aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c118
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c60
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c62
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h126
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h812
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c1
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c57
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c3
-rw-r--r--drivers/infiniband/hw/mlx4/main.c246
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h26
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c6
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c90
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c13
20 files changed, 1015 insertions, 669 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 9edc200b311d..57176ddd4c50 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -235,19 +235,19 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
235 235
236static void set_emss(struct c4iw_ep *ep, u16 opt) 236static void set_emss(struct c4iw_ep *ep, u16 opt)
237{ 237{
238 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 238 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
239 ((AF_INET == ep->com.remote_addr.ss_family) ? 239 ((AF_INET == ep->com.remote_addr.ss_family) ?
240 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - 240 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
241 sizeof(struct tcphdr); 241 sizeof(struct tcphdr);
242 ep->mss = ep->emss; 242 ep->mss = ep->emss;
243 if (GET_TCPOPT_TSTAMP(opt)) 243 if (TCPOPT_TSTAMP_G(opt))
244 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); 244 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
245 if (ep->emss < 128) 245 if (ep->emss < 128)
246 ep->emss = 128; 246 ep->emss = 128;
247 if (ep->emss & 7) 247 if (ep->emss & 7)
248 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n", 248 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
249 GET_TCPOPT_MSS(opt), ep->mss, ep->emss); 249 TCPOPT_MSS_G(opt), ep->mss, ep->emss);
250 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), 250 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
251 ep->mss, ep->emss); 251 ep->mss, ep->emss);
252} 252}
253 253
@@ -652,29 +652,29 @@ static int send_connect(struct c4iw_ep *ep)
652 if (win > RCV_BUFSIZ_M) 652 if (win > RCV_BUFSIZ_M)
653 win = RCV_BUFSIZ_M; 653 win = RCV_BUFSIZ_M;
654 654
655 opt0 = (nocong ? NO_CONG(1) : 0) | 655 opt0 = (nocong ? NO_CONG_F : 0) |
656 KEEP_ALIVE_F | 656 KEEP_ALIVE_F |
657 DELACK(1) | 657 DELACK_F |
658 WND_SCALE_V(wscale) | 658 WND_SCALE_V(wscale) |
659 MSS_IDX_V(mtu_idx) | 659 MSS_IDX_V(mtu_idx) |
660 L2T_IDX_V(ep->l2t->idx) | 660 L2T_IDX_V(ep->l2t->idx) |
661 TX_CHAN_V(ep->tx_chan) | 661 TX_CHAN_V(ep->tx_chan) |
662 SMAC_SEL_V(ep->smac_idx) | 662 SMAC_SEL_V(ep->smac_idx) |
663 DSCP(ep->tos) | 663 DSCP_V(ep->tos) |
664 ULP_MODE_V(ULP_MODE_TCPDDP) | 664 ULP_MODE_V(ULP_MODE_TCPDDP) |
665 RCV_BUFSIZ_V(win); 665 RCV_BUFSIZ_V(win);
666 opt2 = RX_CHANNEL_V(0) | 666 opt2 = RX_CHANNEL_V(0) |
667 CCTRL_ECN(enable_ecn) | 667 CCTRL_ECN_V(enable_ecn) |
668 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 668 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
669 if (enable_tcp_timestamps) 669 if (enable_tcp_timestamps)
670 opt2 |= TSTAMPS_EN(1); 670 opt2 |= TSTAMPS_EN_F;
671 if (enable_tcp_sack) 671 if (enable_tcp_sack)
672 opt2 |= SACK_EN(1); 672 opt2 |= SACK_EN_F;
673 if (wscale && enable_tcp_window_scaling) 673 if (wscale && enable_tcp_window_scaling)
674 opt2 |= WND_SCALE_EN_F; 674 opt2 |= WND_SCALE_EN_F;
675 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 675 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
676 opt2 |= T5_OPT_2_VALID_F; 676 opt2 |= T5_OPT_2_VALID_F;
677 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 677 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
678 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 678 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
679 } 679 }
680 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); 680 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
@@ -1042,7 +1042,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1042 struct c4iw_ep *ep; 1042 struct c4iw_ep *ep;
1043 struct cpl_act_establish *req = cplhdr(skb); 1043 struct cpl_act_establish *req = cplhdr(skb);
1044 unsigned int tid = GET_TID(req); 1044 unsigned int tid = GET_TID(req);
1045 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 1045 unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
1046 struct tid_info *t = dev->rdev.lldi.tids; 1046 struct tid_info *t = dev->rdev.lldi.tids;
1047 1047
1048 ep = lookup_atid(t, atid); 1048 ep = lookup_atid(t, atid);
@@ -1258,8 +1258,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1258 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1258 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1259 ep->hwtid)); 1259 ep->hwtid));
1260 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F | 1260 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
1261 F_RX_DACK_CHANGE | 1261 RX_DACK_CHANGE_F |
1262 V_RX_DACK_MODE(dack_mode)); 1262 RX_DACK_MODE_V(dack_mode));
1263 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1263 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
1264 c4iw_ofld_send(&ep->com.dev->rdev, skb); 1264 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1265 return credits; 1265 return credits;
@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1751 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1751 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1752 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1752 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1753 memset(req, 0, sizeof(*req)); 1753 memset(req, 0, sizeof(*req));
1754 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1754 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
1755 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 1755 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
1756 req->le.filter = cpu_to_be32(cxgb4_select_ntuple( 1756 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1757 ep->com.dev->rdev.lldi.ports[0], 1757 ep->com.dev->rdev.lldi.ports[0],
@@ -1782,27 +1782,27 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1782 if (win > RCV_BUFSIZ_M) 1782 if (win > RCV_BUFSIZ_M)
1783 win = RCV_BUFSIZ_M; 1783 win = RCV_BUFSIZ_M;
1784 1784
1785 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | 1785 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
1786 (nocong ? NO_CONG(1) : 0) | 1786 (nocong ? NO_CONG_F : 0) |
1787 KEEP_ALIVE_F | 1787 KEEP_ALIVE_F |
1788 DELACK(1) | 1788 DELACK_F |
1789 WND_SCALE_V(wscale) | 1789 WND_SCALE_V(wscale) |
1790 MSS_IDX_V(mtu_idx) | 1790 MSS_IDX_V(mtu_idx) |
1791 L2T_IDX_V(ep->l2t->idx) | 1791 L2T_IDX_V(ep->l2t->idx) |
1792 TX_CHAN_V(ep->tx_chan) | 1792 TX_CHAN_V(ep->tx_chan) |
1793 SMAC_SEL_V(ep->smac_idx) | 1793 SMAC_SEL_V(ep->smac_idx) |
1794 DSCP(ep->tos) | 1794 DSCP_V(ep->tos) |
1795 ULP_MODE_V(ULP_MODE_TCPDDP) | 1795 ULP_MODE_V(ULP_MODE_TCPDDP) |
1796 RCV_BUFSIZ_V(win)); 1796 RCV_BUFSIZ_V(win));
1797 req->tcb.opt2 = (__force __be32) (PACE(1) | 1797 req->tcb.opt2 = (__force __be32) (PACE_V(1) |
1798 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1798 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1799 RX_CHANNEL_V(0) | 1799 RX_CHANNEL_V(0) |
1800 CCTRL_ECN(enable_ecn) | 1800 CCTRL_ECN_V(enable_ecn) |
1801 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); 1801 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
1802 if (enable_tcp_timestamps) 1802 if (enable_tcp_timestamps)
1803 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1); 1803 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
1804 if (enable_tcp_sack) 1804 if (enable_tcp_sack)
1805 req->tcb.opt2 |= (__force __be32)SACK_EN(1); 1805 req->tcb.opt2 |= (__force __be32)SACK_EN_F;
1806 if (wscale && enable_tcp_window_scaling) 1806 if (wscale && enable_tcp_window_scaling)
1807 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F; 1807 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
1808 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0); 1808 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
@@ -2023,10 +2023,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2023{ 2023{
2024 struct c4iw_ep *ep; 2024 struct c4iw_ep *ep;
2025 struct cpl_act_open_rpl *rpl = cplhdr(skb); 2025 struct cpl_act_open_rpl *rpl = cplhdr(skb);
2026 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( 2026 unsigned int atid = TID_TID_G(AOPEN_ATID_G(
2027 ntohl(rpl->atid_status))); 2027 ntohl(rpl->atid_status)));
2028 struct tid_info *t = dev->rdev.lldi.tids; 2028 struct tid_info *t = dev->rdev.lldi.tids;
2029 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); 2029 int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
2030 struct sockaddr_in *la; 2030 struct sockaddr_in *la;
2031 struct sockaddr_in *ra; 2031 struct sockaddr_in *ra;
2032 struct sockaddr_in6 *la6; 2032 struct sockaddr_in6 *la6;
@@ -2064,7 +2064,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2064 if (ep->com.local_addr.ss_family == AF_INET && 2064 if (ep->com.local_addr.ss_family == AF_INET &&
2065 dev->rdev.lldi.enable_fw_ofld_conn) { 2065 dev->rdev.lldi.enable_fw_ofld_conn) {
2066 send_fw_act_open_req(ep, 2066 send_fw_act_open_req(ep,
2067 GET_TID_TID(GET_AOPEN_ATID( 2067 TID_TID_G(AOPEN_ATID_G(
2068 ntohl(rpl->atid_status)))); 2068 ntohl(rpl->atid_status))));
2069 return 0; 2069 return 0;
2070 } 2070 }
@@ -2181,39 +2181,39 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2181 win = ep->rcv_win >> 10; 2181 win = ep->rcv_win >> 10;
2182 if (win > RCV_BUFSIZ_M) 2182 if (win > RCV_BUFSIZ_M)
2183 win = RCV_BUFSIZ_M; 2183 win = RCV_BUFSIZ_M;
2184 opt0 = (nocong ? NO_CONG(1) : 0) | 2184 opt0 = (nocong ? NO_CONG_F : 0) |
2185 KEEP_ALIVE_F | 2185 KEEP_ALIVE_F |
2186 DELACK(1) | 2186 DELACK_F |
2187 WND_SCALE_V(wscale) | 2187 WND_SCALE_V(wscale) |
2188 MSS_IDX_V(mtu_idx) | 2188 MSS_IDX_V(mtu_idx) |
2189 L2T_IDX_V(ep->l2t->idx) | 2189 L2T_IDX_V(ep->l2t->idx) |
2190 TX_CHAN_V(ep->tx_chan) | 2190 TX_CHAN_V(ep->tx_chan) |
2191 SMAC_SEL_V(ep->smac_idx) | 2191 SMAC_SEL_V(ep->smac_idx) |
2192 DSCP(ep->tos >> 2) | 2192 DSCP_V(ep->tos >> 2) |
2193 ULP_MODE_V(ULP_MODE_TCPDDP) | 2193 ULP_MODE_V(ULP_MODE_TCPDDP) |
2194 RCV_BUFSIZ_V(win); 2194 RCV_BUFSIZ_V(win);
2195 opt2 = RX_CHANNEL_V(0) | 2195 opt2 = RX_CHANNEL_V(0) |
2196 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 2196 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
2197 2197
2198 if (enable_tcp_timestamps && req->tcpopt.tstamp) 2198 if (enable_tcp_timestamps && req->tcpopt.tstamp)
2199 opt2 |= TSTAMPS_EN(1); 2199 opt2 |= TSTAMPS_EN_F;
2200 if (enable_tcp_sack && req->tcpopt.sack) 2200 if (enable_tcp_sack && req->tcpopt.sack)
2201 opt2 |= SACK_EN(1); 2201 opt2 |= SACK_EN_F;
2202 if (wscale && enable_tcp_window_scaling) 2202 if (wscale && enable_tcp_window_scaling)
2203 opt2 |= WND_SCALE_EN_F; 2203 opt2 |= WND_SCALE_EN_F;
2204 if (enable_ecn) { 2204 if (enable_ecn) {
2205 const struct tcphdr *tcph; 2205 const struct tcphdr *tcph;
2206 u32 hlen = ntohl(req->hdr_len); 2206 u32 hlen = ntohl(req->hdr_len);
2207 2207
2208 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + 2208 tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
2209 G_IP_HDR_LEN(hlen); 2209 IP_HDR_LEN_G(hlen);
2210 if (tcph->ece && tcph->cwr) 2210 if (tcph->ece && tcph->cwr)
2211 opt2 |= CCTRL_ECN(1); 2211 opt2 |= CCTRL_ECN_V(1);
2212 } 2212 }
2213 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2213 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2214 u32 isn = (prandom_u32() & ~7UL) - 1; 2214 u32 isn = (prandom_u32() & ~7UL) - 1;
2215 opt2 |= T5_OPT_2_VALID_F; 2215 opt2 |= T5_OPT_2_VALID_F;
2216 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 2216 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
2217 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 2217 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
2218 rpl5 = (void *)rpl; 2218 rpl5 = (void *)rpl;
2219 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); 2219 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
@@ -2245,8 +2245,8 @@ static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
2245 __u8 *local_ip, __u8 *peer_ip, 2245 __u8 *local_ip, __u8 *peer_ip,
2246 __be16 *local_port, __be16 *peer_port) 2246 __be16 *local_port, __be16 *peer_port)
2247{ 2247{
2248 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); 2248 int eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
2249 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); 2249 int ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
2250 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 2250 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
2251 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len); 2251 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
2252 struct tcphdr *tcp = (struct tcphdr *) 2252 struct tcphdr *tcp = (struct tcphdr *)
@@ -2277,7 +2277,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2277{ 2277{
2278 struct c4iw_ep *child_ep = NULL, *parent_ep; 2278 struct c4iw_ep *child_ep = NULL, *parent_ep;
2279 struct cpl_pass_accept_req *req = cplhdr(skb); 2279 struct cpl_pass_accept_req *req = cplhdr(skb);
2280 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 2280 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
2281 struct tid_info *t = dev->rdev.lldi.tids; 2281 struct tid_info *t = dev->rdev.lldi.tids;
2282 unsigned int hwtid = GET_TID(req); 2282 unsigned int hwtid = GET_TID(req);
2283 struct dst_entry *dst; 2283 struct dst_entry *dst;
@@ -2310,14 +2310,14 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2310 ntohs(peer_port), peer_mss); 2310 ntohs(peer_port), peer_mss);
2311 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, 2311 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
2312 local_port, peer_port, 2312 local_port, peer_port,
2313 GET_POPEN_TOS(ntohl(req->tos_stid))); 2313 PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
2314 } else { 2314 } else {
2315 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" 2315 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2316 , __func__, parent_ep, hwtid, 2316 , __func__, parent_ep, hwtid,
2317 local_ip, peer_ip, ntohs(local_port), 2317 local_ip, peer_ip, ntohs(local_port),
2318 ntohs(peer_port), peer_mss); 2318 ntohs(peer_port), peer_mss);
2319 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port, 2319 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
2320 PASS_OPEN_TOS(ntohl(req->tos_stid)), 2320 PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
2321 ((struct sockaddr_in6 *) 2321 ((struct sockaddr_in6 *)
2322 &parent_ep->com.local_addr)->sin6_scope_id); 2322 &parent_ep->com.local_addr)->sin6_scope_id);
2323 } 2323 }
@@ -2375,7 +2375,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2375 } 2375 }
2376 c4iw_get_ep(&parent_ep->com); 2376 c4iw_get_ep(&parent_ep->com);
2377 child_ep->parent_ep = parent_ep; 2377 child_ep->parent_ep = parent_ep;
2378 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); 2378 child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
2379 child_ep->dst = dst; 2379 child_ep->dst = dst;
2380 child_ep->hwtid = hwtid; 2380 child_ep->hwtid = hwtid;
2381 2381
@@ -3500,24 +3500,24 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3500 3500
3501 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 3501 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
3502 memset(req, 0, sizeof(*req)); 3502 memset(req, 0, sizeof(*req));
3503 req->l2info = cpu_to_be16(V_SYN_INTF(intf) | 3503 req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
3504 V_SYN_MAC_IDX(G_RX_MACIDX( 3504 SYN_MAC_IDX_V(RX_MACIDX_G(
3505 (__force int) htonl(l2info))) | 3505 (__force int) htonl(l2info))) |
3506 F_SYN_XACT_MATCH); 3506 SYN_XACT_MATCH_F);
3507 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 3507 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3508 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) : 3508 RX_ETHHDR_LEN_G((__force int)htonl(l2info)) :
3509 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info)); 3509 RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info));
3510 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( 3510 req->hdr_len = cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(
3511 (__force int) htonl(l2info))) | 3511 (__force int) htonl(l2info))) |
3512 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( 3512 TCP_HDR_LEN_V(RX_TCPHDR_LEN_G(
3513 (__force int) htons(hdr_len))) | 3513 (__force int) htons(hdr_len))) |
3514 V_IP_HDR_LEN(G_RX_IPHDR_LEN( 3514 IP_HDR_LEN_V(RX_IPHDR_LEN_G(
3515 (__force int) htons(hdr_len))) | 3515 (__force int) htons(hdr_len))) |
3516 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len))); 3516 ETH_HDR_LEN_V(RX_ETHHDR_LEN_G(eth_hdr_len)));
3517 req->vlan = (__force __be16) vlantag; 3517 req->vlan = (__force __be16) vlantag;
3518 req->len = (__force __be16) len; 3518 req->len = (__force __be16) len;
3519 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | 3519 req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
3520 PASS_OPEN_TOS(tos)); 3520 PASS_OPEN_TOS_V(tos));
3521 req->tcpopt.mss = htons(tmp_opt.mss_clamp); 3521 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3522 if (tmp_opt.wscale_ok) 3522 if (tmp_opt.wscale_ok)
3523 req->tcpopt.wsf = tmp_opt.snd_wscale; 3523 req->tcpopt.wsf = tmp_opt.snd_wscale;
@@ -3542,7 +3542,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3542 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 3542 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3543 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 3543 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3544 memset(req, 0, sizeof(*req)); 3544 memset(req, 0, sizeof(*req));
3545 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); 3545 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
3546 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 3546 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
3547 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); 3547 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
3548 req->le.filter = (__force __be32) filter; 3548 req->le.filter = (__force __be32) filter;
@@ -3556,7 +3556,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3556 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) | 3556 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
3557 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) | 3557 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
3558 FW_OFLD_CONNECTION_WR_ASTID_V( 3558 FW_OFLD_CONNECTION_WR_ASTID_V(
3559 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); 3559 PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
3560 3560
3561 /* 3561 /*
3562 * We store the qid in opt2 which will be used by the firmware 3562 * We store the qid in opt2 which will be used by the firmware
@@ -3613,7 +3613,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3613 struct neighbour *neigh; 3613 struct neighbour *neigh;
3614 3614
3615 /* Drop all non-SYN packets */ 3615 /* Drop all non-SYN packets */
3616 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) 3616 if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
3617 goto reject; 3617 goto reject;
3618 3618
3619 /* 3619 /*
@@ -3635,8 +3635,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3635 } 3635 }
3636 3636
3637 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 3637 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3638 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) : 3638 RX_ETHHDR_LEN_G(htonl(cpl->l2info)) :
3639 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info)); 3639 RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info));
3640 if (eth_hdr_len == ETH_HLEN) { 3640 if (eth_hdr_len == ETH_HLEN) {
3641 eh = (struct ethhdr *)(req + 1); 3641 eh = (struct ethhdr *)(req + 1);
3642 iph = (struct iphdr *)(eh + 1); 3642 iph = (struct iphdr *)(eh + 1);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index e9fd3a029296..ab7692ac2044 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -52,7 +52,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
52 memset(res_wr, 0, wr_len); 52 memset(res_wr, 0, wr_len);
53 res_wr->op_nres = cpu_to_be32( 53 res_wr->op_nres = cpu_to_be32(
54 FW_WR_OP_V(FW_RI_RES_WR) | 54 FW_WR_OP_V(FW_RI_RES_WR) |
55 V_FW_RI_RES_WR_NRES(1) | 55 FW_RI_RES_WR_NRES_V(1) |
56 FW_WR_COMPL_F); 56 FW_WR_COMPL_F);
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (unsigned long) &wr_wait; 58 res_wr->cookie = (unsigned long) &wr_wait;
@@ -122,7 +122,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
122 memset(res_wr, 0, wr_len); 122 memset(res_wr, 0, wr_len);
123 res_wr->op_nres = cpu_to_be32( 123 res_wr->op_nres = cpu_to_be32(
124 FW_WR_OP_V(FW_RI_RES_WR) | 124 FW_WR_OP_V(FW_RI_RES_WR) |
125 V_FW_RI_RES_WR_NRES(1) | 125 FW_RI_RES_WR_NRES_V(1) |
126 FW_WR_COMPL_F); 126 FW_WR_COMPL_F);
127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
128 res_wr->cookie = (unsigned long) &wr_wait; 128 res_wr->cookie = (unsigned long) &wr_wait;
@@ -131,17 +131,17 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
131 res->u.cq.op = FW_RI_RES_OP_WRITE; 131 res->u.cq.op = FW_RI_RES_OP_WRITE;
132 res->u.cq.iqid = cpu_to_be32(cq->cqid); 132 res->u.cq.iqid = cpu_to_be32(cq->cqid);
133 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( 133 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
134 V_FW_RI_RES_WR_IQANUS(0) | 134 FW_RI_RES_WR_IQANUS_V(0) |
135 V_FW_RI_RES_WR_IQANUD(1) | 135 FW_RI_RES_WR_IQANUD_V(1) |
136 F_FW_RI_RES_WR_IQANDST | 136 FW_RI_RES_WR_IQANDST_F |
137 V_FW_RI_RES_WR_IQANDSTINDEX( 137 FW_RI_RES_WR_IQANDSTINDEX_V(
138 rdev->lldi.ciq_ids[cq->vector])); 138 rdev->lldi.ciq_ids[cq->vector]));
139 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( 139 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
140 F_FW_RI_RES_WR_IQDROPRSS | 140 FW_RI_RES_WR_IQDROPRSS_F |
141 V_FW_RI_RES_WR_IQPCIECH(2) | 141 FW_RI_RES_WR_IQPCIECH_V(2) |
142 V_FW_RI_RES_WR_IQINTCNTTHRESH(0) | 142 FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
143 F_FW_RI_RES_WR_IQO | 143 FW_RI_RES_WR_IQO_F |
144 V_FW_RI_RES_WR_IQESIZE(1)); 144 FW_RI_RES_WR_IQESIZE_V(1));
145 res->u.cq.iqsize = cpu_to_be16(cq->size); 145 res->u.cq.iqsize = cpu_to_be16(cq->size);
146 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); 146 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
147 147
@@ -182,12 +182,12 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
182 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, 182 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
183 wq, cq, cq->sw_cidx, cq->sw_pidx); 183 wq, cq, cq->sw_cidx, cq->sw_pidx);
184 memset(&cqe, 0, sizeof(cqe)); 184 memset(&cqe, 0, sizeof(cqe));
185 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | 185 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
186 V_CQE_OPCODE(FW_RI_SEND) | 186 CQE_OPCODE_V(FW_RI_SEND) |
187 V_CQE_TYPE(0) | 187 CQE_TYPE_V(0) |
188 V_CQE_SWCQE(1) | 188 CQE_SWCQE_V(1) |
189 V_CQE_QPID(wq->sq.qid)); 189 CQE_QPID_V(wq->sq.qid));
190 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); 190 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
191 cq->sw_queue[cq->sw_pidx] = cqe; 191 cq->sw_queue[cq->sw_pidx] = cqe;
192 t4_swcq_produce(cq); 192 t4_swcq_produce(cq);
193} 193}
@@ -215,13 +215,13 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
215 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, 215 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
216 wq, cq, cq->sw_cidx, cq->sw_pidx); 216 wq, cq, cq->sw_cidx, cq->sw_pidx);
217 memset(&cqe, 0, sizeof(cqe)); 217 memset(&cqe, 0, sizeof(cqe));
218 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | 218 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
219 V_CQE_OPCODE(swcqe->opcode) | 219 CQE_OPCODE_V(swcqe->opcode) |
220 V_CQE_TYPE(1) | 220 CQE_TYPE_V(1) |
221 V_CQE_SWCQE(1) | 221 CQE_SWCQE_V(1) |
222 V_CQE_QPID(wq->sq.qid)); 222 CQE_QPID_V(wq->sq.qid));
223 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; 223 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
224 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); 224 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
225 cq->sw_queue[cq->sw_pidx] = cqe; 225 cq->sw_queue[cq->sw_pidx] = cqe;
226 t4_swcq_produce(cq); 226 t4_swcq_produce(cq);
227} 227}
@@ -284,7 +284,7 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
284 */ 284 */
285 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n", 285 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
286 __func__, cidx, cq->sw_pidx); 286 __func__, cidx, cq->sw_pidx);
287 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1)); 287 swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
288 cq->sw_queue[cq->sw_pidx] = swsqe->cqe; 288 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
289 t4_swcq_produce(cq); 289 t4_swcq_produce(cq);
290 swsqe->flushed = 1; 290 swsqe->flushed = 1;
@@ -301,10 +301,10 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
301{ 301{
302 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; 302 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
303 read_cqe->len = htonl(wq->sq.oldest_read->read_len); 303 read_cqe->len = htonl(wq->sq.oldest_read->read_len);
304 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) | 304 read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
305 V_CQE_SWCQE(SW_CQE(hw_cqe)) | 305 CQE_SWCQE_V(SW_CQE(hw_cqe)) |
306 V_CQE_OPCODE(FW_RI_READ_REQ) | 306 CQE_OPCODE_V(FW_RI_READ_REQ) |
307 V_CQE_TYPE(1)); 307 CQE_TYPE_V(1));
308 read_cqe->bits_type_ts = hw_cqe->bits_type_ts; 308 read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
309} 309}
310 310
@@ -400,7 +400,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
400 } else { 400 } else {
401 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx]; 401 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
402 *swcqe = *hw_cqe; 402 *swcqe = *hw_cqe;
403 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); 403 swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
404 t4_swcq_produce(&chp->cq); 404 t4_swcq_produce(&chp->cq);
405 } 405 }
406next_cqe: 406next_cqe:
@@ -576,7 +576,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
576 } 576 }
577 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { 577 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
578 t4_set_wq_in_error(wq); 578 t4_set_wq_in_error(wq);
579 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN)); 579 hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
580 goto proc_cqe; 580 goto proc_cqe;
581 } 581 }
582 goto proc_cqe; 582 goto proc_cqe;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index eb5df4e62703..aafdbcd84fc4 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -380,12 +380,12 @@ static int dump_stag(int id, void *p, void *data)
380 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d " 380 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
381 "perm 0x%x ps %d len 0x%llx va 0x%llx\n", 381 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
382 (u32)id<<8, 382 (u32)id<<8,
383 G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)), 383 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
384 G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)), 384 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
385 G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)), 385 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
386 G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)), 386 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
387 G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)), 387 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
388 G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)), 388 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
389 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo), 389 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
390 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo)); 390 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
391 if (cc < space) 391 if (cc < space)
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index c9df0549f51d..794555dc86a5 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -50,12 +50,12 @@ static void print_tpte(struct c4iw_dev *dev, u32 stag)
50 PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d " 50 PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
51 "perm 0x%x ps %d len 0x%llx va 0x%llx\n", 51 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
52 stag & 0xffffff00, 52 stag & 0xffffff00,
53 G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)), 53 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
54 G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)), 54 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
55 G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)), 55 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
56 G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)), 56 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
57 G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)), 57 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
58 G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)), 58 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
59 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo), 59 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
60 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo)); 60 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
61} 61}
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index cb43c2299ac0..6791fd16272c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
86 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; 86 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
87 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); 87 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
88 req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE)); 88 req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
89 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); 89 req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
90 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5)); 90 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
91 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); 91 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
92 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr)); 92 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
93 93
94 sgl = (struct ulptx_sgl *)(req + 1); 94 sgl = (struct ulptx_sgl *)(req + 1);
95 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 95 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
96 ULPTX_NSGE(1)); 96 ULPTX_NSGE_V(1));
97 sgl->len0 = cpu_to_be32(len); 97 sgl->len0 = cpu_to_be32(len);
98 sgl->addr0 = cpu_to_be64(data); 98 sgl->addr0 = cpu_to_be64(data);
99 99
@@ -286,17 +286,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
286 if (reset_tpt_entry) 286 if (reset_tpt_entry)
287 memset(&tpt, 0, sizeof(tpt)); 287 memset(&tpt, 0, sizeof(tpt));
288 else { 288 else {
289 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID | 289 tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
290 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) | 290 FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
291 V_FW_RI_TPTE_STAGSTATE(stag_state) | 291 FW_RI_TPTE_STAGSTATE_V(stag_state) |
292 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid)); 292 FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
293 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) | 293 tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
294 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) | 294 (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
295 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO : 295 FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
296 FW_RI_VA_BASED_TO))| 296 FW_RI_VA_BASED_TO))|
297 V_FW_RI_TPTE_PS(page_size)); 297 FW_RI_TPTE_PS_V(page_size));
298 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( 298 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
299 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3)); 299 FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
300 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); 300 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
301 tpt.va_hi = cpu_to_be32((u32)(to >> 32)); 301 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
302 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); 302 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bb85d479e66e..15cae5a31018 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -272,7 +272,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
272 memset(res_wr, 0, wr_len); 272 memset(res_wr, 0, wr_len);
273 res_wr->op_nres = cpu_to_be32( 273 res_wr->op_nres = cpu_to_be32(
274 FW_WR_OP_V(FW_RI_RES_WR) | 274 FW_WR_OP_V(FW_RI_RES_WR) |
275 V_FW_RI_RES_WR_NRES(2) | 275 FW_RI_RES_WR_NRES_V(2) |
276 FW_WR_COMPL_F); 276 FW_WR_COMPL_F);
277 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 277 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
278 res_wr->cookie = (unsigned long) &wr_wait; 278 res_wr->cookie = (unsigned long) &wr_wait;
@@ -287,19 +287,19 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
287 rdev->hw_queue.t4_eq_status_entries; 287 rdev->hw_queue.t4_eq_status_entries;
288 288
289 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( 289 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
290 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 290 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
291 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ 291 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
292 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ 292 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
293 (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) | 293 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
294 V_FW_RI_RES_WR_IQID(scq->cqid)); 294 FW_RI_RES_WR_IQID_V(scq->cqid));
295 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( 295 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
296 V_FW_RI_RES_WR_DCAEN(0) | 296 FW_RI_RES_WR_DCAEN_V(0) |
297 V_FW_RI_RES_WR_DCACPU(0) | 297 FW_RI_RES_WR_DCACPU_V(0) |
298 V_FW_RI_RES_WR_FBMIN(2) | 298 FW_RI_RES_WR_FBMIN_V(2) |
299 V_FW_RI_RES_WR_FBMAX(2) | 299 FW_RI_RES_WR_FBMAX_V(2) |
300 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 300 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
301 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 301 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
302 V_FW_RI_RES_WR_EQSIZE(eqsize)); 302 FW_RI_RES_WR_EQSIZE_V(eqsize));
303 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); 303 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
304 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); 304 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
305 res++; 305 res++;
@@ -312,18 +312,18 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
312 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + 312 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
313 rdev->hw_queue.t4_eq_status_entries; 313 rdev->hw_queue.t4_eq_status_entries;
314 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( 314 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
315 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 315 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
316 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ 316 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
317 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ 317 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
318 V_FW_RI_RES_WR_IQID(rcq->cqid)); 318 FW_RI_RES_WR_IQID_V(rcq->cqid));
319 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( 319 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
320 V_FW_RI_RES_WR_DCAEN(0) | 320 FW_RI_RES_WR_DCAEN_V(0) |
321 V_FW_RI_RES_WR_DCACPU(0) | 321 FW_RI_RES_WR_DCACPU_V(0) |
322 V_FW_RI_RES_WR_FBMIN(2) | 322 FW_RI_RES_WR_FBMIN_V(2) |
323 V_FW_RI_RES_WR_FBMAX(2) | 323 FW_RI_RES_WR_FBMAX_V(2) |
324 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 324 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
325 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 325 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
326 V_FW_RI_RES_WR_EQSIZE(eqsize)); 326 FW_RI_RES_WR_EQSIZE_V(eqsize));
327 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); 327 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
328 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); 328 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
329 329
@@ -444,19 +444,19 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
444 case IB_WR_SEND: 444 case IB_WR_SEND:
445 if (wr->send_flags & IB_SEND_SOLICITED) 445 if (wr->send_flags & IB_SEND_SOLICITED)
446 wqe->send.sendop_pkd = cpu_to_be32( 446 wqe->send.sendop_pkd = cpu_to_be32(
447 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE)); 447 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
448 else 448 else
449 wqe->send.sendop_pkd = cpu_to_be32( 449 wqe->send.sendop_pkd = cpu_to_be32(
450 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND)); 450 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
451 wqe->send.stag_inv = 0; 451 wqe->send.stag_inv = 0;
452 break; 452 break;
453 case IB_WR_SEND_WITH_INV: 453 case IB_WR_SEND_WITH_INV:
454 if (wr->send_flags & IB_SEND_SOLICITED) 454 if (wr->send_flags & IB_SEND_SOLICITED)
455 wqe->send.sendop_pkd = cpu_to_be32( 455 wqe->send.sendop_pkd = cpu_to_be32(
456 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV)); 456 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
457 else 457 else
458 wqe->send.sendop_pkd = cpu_to_be32( 458 wqe->send.sendop_pkd = cpu_to_be32(
459 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV)); 459 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
460 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 460 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
461 break; 461 break;
462 462
@@ -1283,8 +1283,8 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1283 1283
1284 wqe->u.init.type = FW_RI_TYPE_INIT; 1284 wqe->u.init.type = FW_RI_TYPE_INIT;
1285 wqe->u.init.mpareqbit_p2ptype = 1285 wqe->u.init.mpareqbit_p2ptype =
1286 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) | 1286 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1287 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type); 1287 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
1288 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; 1288 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1289 if (qhp->attr.mpa_attr.recv_marker_enabled) 1289 if (qhp->attr.mpa_attr.recv_marker_enabled)
1290 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; 1290 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
@@ -1776,7 +1776,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1776 if (mm5) { 1776 if (mm5) {
1777 mm5->key = uresp.ma_sync_key; 1777 mm5->key = uresp.ma_sync_key;
1778 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0) 1778 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
1779 + A_PCIE_MA_SYNC) & PAGE_MASK; 1779 + PCIE_MA_SYNC_A) & PAGE_MASK;
1780 mm5->len = PAGE_SIZE; 1780 mm5->len = PAGE_SIZE;
1781 insert_mmap(ucontext, mm5); 1781 insert_mmap(ucontext, mm5);
1782 } 1782 }
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index c04e5134b30c..871cdcac7be2 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -41,7 +41,7 @@
41#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 41#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
42#define T4_STAG_UNSET 0xffffffff 42#define T4_STAG_UNSET 0xffffffff
43#define T4_FW_MAJ 0 43#define T4_FW_MAJ 0
44#define A_PCIE_MA_SYNC 0x30b4 44#define PCIE_MA_SYNC_A 0x30b4
45 45
46struct t4_status_page { 46struct t4_status_page {
47 __be32 rsvd1; /* flit 0 - hw owns */ 47 __be32 rsvd1; /* flit 0 - hw owns */
@@ -184,44 +184,44 @@ struct t4_cqe {
184 184
185/* macros for flit 0 of the cqe */ 185/* macros for flit 0 of the cqe */
186 186
187#define S_CQE_QPID 12 187#define CQE_QPID_S 12
188#define M_CQE_QPID 0xFFFFF 188#define CQE_QPID_M 0xFFFFF
189#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID) 189#define CQE_QPID_G(x) ((((x) >> CQE_QPID_S)) & CQE_QPID_M)
190#define V_CQE_QPID(x) ((x)<<S_CQE_QPID) 190#define CQE_QPID_V(x) ((x)<<CQE_QPID_S)
191 191
192#define S_CQE_SWCQE 11 192#define CQE_SWCQE_S 11
193#define M_CQE_SWCQE 0x1 193#define CQE_SWCQE_M 0x1
194#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE) 194#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
195#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE) 195#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
196 196
197#define S_CQE_STATUS 5 197#define CQE_STATUS_S 5
198#define M_CQE_STATUS 0x1F 198#define CQE_STATUS_M 0x1F
199#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS) 199#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
200#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS) 200#define CQE_STATUS_V(x) ((x)<<CQE_STATUS_S)
201 201
202#define S_CQE_TYPE 4 202#define CQE_TYPE_S 4
203#define M_CQE_TYPE 0x1 203#define CQE_TYPE_M 0x1
204#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE) 204#define CQE_TYPE_G(x) ((((x) >> CQE_TYPE_S)) & CQE_TYPE_M)
205#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE) 205#define CQE_TYPE_V(x) ((x)<<CQE_TYPE_S)
206 206
207#define S_CQE_OPCODE 0 207#define CQE_OPCODE_S 0
208#define M_CQE_OPCODE 0xF 208#define CQE_OPCODE_M 0xF
209#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE) 209#define CQE_OPCODE_G(x) ((((x) >> CQE_OPCODE_S)) & CQE_OPCODE_M)
210#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE) 210#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
211 211
212#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header))) 212#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
213#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header))) 213#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
214#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header))) 214#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
215#define SQ_TYPE(x) (CQE_TYPE((x))) 215#define SQ_TYPE(x) (CQE_TYPE((x)))
216#define RQ_TYPE(x) (!CQE_TYPE((x))) 216#define RQ_TYPE(x) (!CQE_TYPE((x)))
217#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header))) 217#define CQE_STATUS(x) (CQE_STATUS_G(be32_to_cpu((x)->header)))
218#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header))) 218#define CQE_OPCODE(x) (CQE_OPCODE_G(be32_to_cpu((x)->header)))
219 219
220#define CQE_SEND_OPCODE(x)( \ 220#define CQE_SEND_OPCODE(x)( \
221 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ 221 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
222 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ 222 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
223 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ 223 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
224 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) 224 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
225 225
226#define CQE_LEN(x) (be32_to_cpu((x)->len)) 226#define CQE_LEN(x) (be32_to_cpu((x)->len))
227 227
@@ -237,25 +237,25 @@ struct t4_cqe {
237#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) 237#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
238 238
239/* macros for flit 3 of the cqe */ 239/* macros for flit 3 of the cqe */
240#define S_CQE_GENBIT 63 240#define CQE_GENBIT_S 63
241#define M_CQE_GENBIT 0x1 241#define CQE_GENBIT_M 0x1
242#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT) 242#define CQE_GENBIT_G(x) (((x) >> CQE_GENBIT_S) & CQE_GENBIT_M)
243#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT) 243#define CQE_GENBIT_V(x) ((x)<<CQE_GENBIT_S)
244 244
245#define S_CQE_OVFBIT 62 245#define CQE_OVFBIT_S 62
246#define M_CQE_OVFBIT 0x1 246#define CQE_OVFBIT_M 0x1
247#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT) 247#define CQE_OVFBIT_G(x) ((((x) >> CQE_OVFBIT_S)) & CQE_OVFBIT_M)
248 248
249#define S_CQE_IQTYPE 60 249#define CQE_IQTYPE_S 60
250#define M_CQE_IQTYPE 0x3 250#define CQE_IQTYPE_M 0x3
251#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE) 251#define CQE_IQTYPE_G(x) ((((x) >> CQE_IQTYPE_S)) & CQE_IQTYPE_M)
252 252
253#define M_CQE_TS 0x0fffffffffffffffULL 253#define CQE_TS_M 0x0fffffffffffffffULL
254#define G_CQE_TS(x) ((x) & M_CQE_TS) 254#define CQE_TS_G(x) ((x) & CQE_TS_M)
255 255
256#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts))) 256#define CQE_OVFBIT(x) ((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts)))
257#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts))) 257#define CQE_GENBIT(x) ((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts)))
258#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts))) 258#define CQE_TS(x) (CQE_TS_G(be64_to_cpu((x)->bits_type_ts)))
259 259
260struct t4_swsqe { 260struct t4_swsqe {
261 u64 wr_id; 261 u64 wr_id;
@@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
465 } else { 465 } else {
466 PDBG("%s: DB wq->sq.pidx = %d\n", 466 PDBG("%s: DB wq->sq.pidx = %d\n",
467 __func__, wq->sq.pidx); 467 __func__, wq->sq.pidx);
468 writel(PIDX_T5(inc), wq->sq.udb); 468 writel(PIDX_T5_V(inc), wq->sq.udb);
469 } 469 }
470 470
471 /* Flush user doorbell area writes. */ 471 /* Flush user doorbell area writes. */
472 wmb(); 472 wmb();
473 return; 473 return;
474 } 474 }
475 writel(QID(wq->sq.qid) | PIDX(inc), wq->db); 475 writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
476} 476}
477 477
478static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, 478static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
@@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
489 } else { 489 } else {
490 PDBG("%s: DB wq->rq.pidx = %d\n", 490 PDBG("%s: DB wq->rq.pidx = %d\n",
491 __func__, wq->rq.pidx); 491 __func__, wq->rq.pidx);
492 writel(PIDX_T5(inc), wq->rq.udb); 492 writel(PIDX_T5_V(inc), wq->rq.udb);
493 } 493 }
494 494
495 /* Flush user doorbell area writes. */ 495 /* Flush user doorbell area writes. */
496 wmb(); 496 wmb();
497 return; 497 return;
498 } 498 }
499 writel(QID(wq->rq.qid) | PIDX(inc), wq->db); 499 writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
500} 500}
501 501
502static inline int t4_wq_in_error(struct t4_wq *wq) 502static inline int t4_wq_in_error(struct t4_wq *wq)
@@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
561 u32 val; 561 u32 val;
562 562
563 set_bit(CQ_ARMED, &cq->flags); 563 set_bit(CQ_ARMED, &cq->flags);
564 while (cq->cidx_inc > CIDXINC_MASK) { 564 while (cq->cidx_inc > CIDXINC_M) {
565 val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | 565 val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
566 INGRESSQID(cq->cqid); 566 INGRESSQID_V(cq->cqid);
567 writel(val, cq->gts); 567 writel(val, cq->gts);
568 cq->cidx_inc -= CIDXINC_MASK; 568 cq->cidx_inc -= CIDXINC_M;
569 } 569 }
570 val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | 570 val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
571 INGRESSQID(cq->cqid); 571 INGRESSQID_V(cq->cqid);
572 writel(val, cq->gts); 572 writel(val, cq->gts);
573 cq->cidx_inc = 0; 573 cq->cidx_inc = 0;
574 return 0; 574 return 0;
@@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
597static inline void t4_hwcq_consume(struct t4_cq *cq) 597static inline void t4_hwcq_consume(struct t4_cq *cq)
598{ 598{
599 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; 599 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
600 if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) { 600 if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
601 u32 val; 601 u32 val;
602 602
603 val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) | 603 val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
604 INGRESSQID(cq->cqid); 604 INGRESSQID_V(cq->cqid);
605 writel(val, cq->gts); 605 writel(val, cq->gts);
606 cq->cidx_inc = 0; 606 cq->cidx_inc = 0;
607 } 607 }
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 5709e77faf7c..5e53327fc647 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -162,102 +162,102 @@ struct fw_ri_tpte {
162 __be32 len_hi; 162 __be32 len_hi;
163}; 163};
164 164
165#define S_FW_RI_TPTE_VALID 31 165#define FW_RI_TPTE_VALID_S 31
166#define M_FW_RI_TPTE_VALID 0x1 166#define FW_RI_TPTE_VALID_M 0x1
167#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID) 167#define FW_RI_TPTE_VALID_V(x) ((x) << FW_RI_TPTE_VALID_S)
168#define G_FW_RI_TPTE_VALID(x) \ 168#define FW_RI_TPTE_VALID_G(x) \
169 (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID) 169 (((x) >> FW_RI_TPTE_VALID_S) & FW_RI_TPTE_VALID_M)
170#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U) 170#define FW_RI_TPTE_VALID_F FW_RI_TPTE_VALID_V(1U)
171 171
172#define S_FW_RI_TPTE_STAGKEY 23 172#define FW_RI_TPTE_STAGKEY_S 23
173#define M_FW_RI_TPTE_STAGKEY 0xff 173#define FW_RI_TPTE_STAGKEY_M 0xff
174#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY) 174#define FW_RI_TPTE_STAGKEY_V(x) ((x) << FW_RI_TPTE_STAGKEY_S)
175#define G_FW_RI_TPTE_STAGKEY(x) \ 175#define FW_RI_TPTE_STAGKEY_G(x) \
176 (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY) 176 (((x) >> FW_RI_TPTE_STAGKEY_S) & FW_RI_TPTE_STAGKEY_M)
177 177
178#define S_FW_RI_TPTE_STAGSTATE 22 178#define FW_RI_TPTE_STAGSTATE_S 22
179#define M_FW_RI_TPTE_STAGSTATE 0x1 179#define FW_RI_TPTE_STAGSTATE_M 0x1
180#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE) 180#define FW_RI_TPTE_STAGSTATE_V(x) ((x) << FW_RI_TPTE_STAGSTATE_S)
181#define G_FW_RI_TPTE_STAGSTATE(x) \ 181#define FW_RI_TPTE_STAGSTATE_G(x) \
182 (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE) 182 (((x) >> FW_RI_TPTE_STAGSTATE_S) & FW_RI_TPTE_STAGSTATE_M)
183#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U) 183#define FW_RI_TPTE_STAGSTATE_F FW_RI_TPTE_STAGSTATE_V(1U)
184 184
185#define S_FW_RI_TPTE_STAGTYPE 20 185#define FW_RI_TPTE_STAGTYPE_S 20
186#define M_FW_RI_TPTE_STAGTYPE 0x3 186#define FW_RI_TPTE_STAGTYPE_M 0x3
187#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE) 187#define FW_RI_TPTE_STAGTYPE_V(x) ((x) << FW_RI_TPTE_STAGTYPE_S)
188#define G_FW_RI_TPTE_STAGTYPE(x) \ 188#define FW_RI_TPTE_STAGTYPE_G(x) \
189 (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE) 189 (((x) >> FW_RI_TPTE_STAGTYPE_S) & FW_RI_TPTE_STAGTYPE_M)
190 190
191#define S_FW_RI_TPTE_PDID 0 191#define FW_RI_TPTE_PDID_S 0
192#define M_FW_RI_TPTE_PDID 0xfffff 192#define FW_RI_TPTE_PDID_M 0xfffff
193#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID) 193#define FW_RI_TPTE_PDID_V(x) ((x) << FW_RI_TPTE_PDID_S)
194#define G_FW_RI_TPTE_PDID(x) \ 194#define FW_RI_TPTE_PDID_G(x) \
195 (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID) 195 (((x) >> FW_RI_TPTE_PDID_S) & FW_RI_TPTE_PDID_M)
196 196
197#define S_FW_RI_TPTE_PERM 28 197#define FW_RI_TPTE_PERM_S 28
198#define M_FW_RI_TPTE_PERM 0xf 198#define FW_RI_TPTE_PERM_M 0xf
199#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM) 199#define FW_RI_TPTE_PERM_V(x) ((x) << FW_RI_TPTE_PERM_S)
200#define G_FW_RI_TPTE_PERM(x) \ 200#define FW_RI_TPTE_PERM_G(x) \
201 (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM) 201 (((x) >> FW_RI_TPTE_PERM_S) & FW_RI_TPTE_PERM_M)
202 202
203#define S_FW_RI_TPTE_REMINVDIS 27 203#define FW_RI_TPTE_REMINVDIS_S 27
204#define M_FW_RI_TPTE_REMINVDIS 0x1 204#define FW_RI_TPTE_REMINVDIS_M 0x1
205#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS) 205#define FW_RI_TPTE_REMINVDIS_V(x) ((x) << FW_RI_TPTE_REMINVDIS_S)
206#define G_FW_RI_TPTE_REMINVDIS(x) \ 206#define FW_RI_TPTE_REMINVDIS_G(x) \
207 (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS) 207 (((x) >> FW_RI_TPTE_REMINVDIS_S) & FW_RI_TPTE_REMINVDIS_M)
208#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U) 208#define FW_RI_TPTE_REMINVDIS_F FW_RI_TPTE_REMINVDIS_V(1U)
209 209
210#define S_FW_RI_TPTE_ADDRTYPE 26 210#define FW_RI_TPTE_ADDRTYPE_S 26
211#define M_FW_RI_TPTE_ADDRTYPE 1 211#define FW_RI_TPTE_ADDRTYPE_M 1
212#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE) 212#define FW_RI_TPTE_ADDRTYPE_V(x) ((x) << FW_RI_TPTE_ADDRTYPE_S)
213#define G_FW_RI_TPTE_ADDRTYPE(x) \ 213#define FW_RI_TPTE_ADDRTYPE_G(x) \
214 (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE) 214 (((x) >> FW_RI_TPTE_ADDRTYPE_S) & FW_RI_TPTE_ADDRTYPE_M)
215#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U) 215#define FW_RI_TPTE_ADDRTYPE_F FW_RI_TPTE_ADDRTYPE_V(1U)
216 216
217#define S_FW_RI_TPTE_MWBINDEN 25 217#define FW_RI_TPTE_MWBINDEN_S 25
218#define M_FW_RI_TPTE_MWBINDEN 0x1 218#define FW_RI_TPTE_MWBINDEN_M 0x1
219#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN) 219#define FW_RI_TPTE_MWBINDEN_V(x) ((x) << FW_RI_TPTE_MWBINDEN_S)
220#define G_FW_RI_TPTE_MWBINDEN(x) \ 220#define FW_RI_TPTE_MWBINDEN_G(x) \
221 (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN) 221 (((x) >> FW_RI_TPTE_MWBINDEN_S) & FW_RI_TPTE_MWBINDEN_M)
222#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U) 222#define FW_RI_TPTE_MWBINDEN_F FW_RI_TPTE_MWBINDEN_V(1U)
223 223
224#define S_FW_RI_TPTE_PS 20 224#define FW_RI_TPTE_PS_S 20
225#define M_FW_RI_TPTE_PS 0x1f 225#define FW_RI_TPTE_PS_M 0x1f
226#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS) 226#define FW_RI_TPTE_PS_V(x) ((x) << FW_RI_TPTE_PS_S)
227#define G_FW_RI_TPTE_PS(x) \ 227#define FW_RI_TPTE_PS_G(x) \
228 (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS) 228 (((x) >> FW_RI_TPTE_PS_S) & FW_RI_TPTE_PS_M)
229 229
230#define S_FW_RI_TPTE_QPID 0 230#define FW_RI_TPTE_QPID_S 0
231#define M_FW_RI_TPTE_QPID 0xfffff 231#define FW_RI_TPTE_QPID_M 0xfffff
232#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID) 232#define FW_RI_TPTE_QPID_V(x) ((x) << FW_RI_TPTE_QPID_S)
233#define G_FW_RI_TPTE_QPID(x) \ 233#define FW_RI_TPTE_QPID_G(x) \
234 (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID) 234 (((x) >> FW_RI_TPTE_QPID_S) & FW_RI_TPTE_QPID_M)
235 235
236#define S_FW_RI_TPTE_NOSNOOP 30 236#define FW_RI_TPTE_NOSNOOP_S 30
237#define M_FW_RI_TPTE_NOSNOOP 0x1 237#define FW_RI_TPTE_NOSNOOP_M 0x1
238#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP) 238#define FW_RI_TPTE_NOSNOOP_V(x) ((x) << FW_RI_TPTE_NOSNOOP_S)
239#define G_FW_RI_TPTE_NOSNOOP(x) \ 239#define FW_RI_TPTE_NOSNOOP_G(x) \
240 (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP) 240 (((x) >> FW_RI_TPTE_NOSNOOP_S) & FW_RI_TPTE_NOSNOOP_M)
241#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U) 241#define FW_RI_TPTE_NOSNOOP_F FW_RI_TPTE_NOSNOOP_V(1U)
242 242
243#define S_FW_RI_TPTE_PBLADDR 0 243#define FW_RI_TPTE_PBLADDR_S 0
244#define M_FW_RI_TPTE_PBLADDR 0x1fffffff 244#define FW_RI_TPTE_PBLADDR_M 0x1fffffff
245#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR) 245#define FW_RI_TPTE_PBLADDR_V(x) ((x) << FW_RI_TPTE_PBLADDR_S)
246#define G_FW_RI_TPTE_PBLADDR(x) \ 246#define FW_RI_TPTE_PBLADDR_G(x) \
247 (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR) 247 (((x) >> FW_RI_TPTE_PBLADDR_S) & FW_RI_TPTE_PBLADDR_M)
248 248
249#define S_FW_RI_TPTE_DCA 24 249#define FW_RI_TPTE_DCA_S 24
250#define M_FW_RI_TPTE_DCA 0x1f 250#define FW_RI_TPTE_DCA_M 0x1f
251#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA) 251#define FW_RI_TPTE_DCA_V(x) ((x) << FW_RI_TPTE_DCA_S)
252#define G_FW_RI_TPTE_DCA(x) \ 252#define FW_RI_TPTE_DCA_G(x) \
253 (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA) 253 (((x) >> FW_RI_TPTE_DCA_S) & FW_RI_TPTE_DCA_M)
254 254
255#define S_FW_RI_TPTE_MWBCNT_PSTAG 0 255#define FW_RI_TPTE_MWBCNT_PSTAG_S 0
256#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff 256#define FW_RI_TPTE_MWBCNT_PSTAG_M 0xffffff
257#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \ 257#define FW_RI_TPTE_MWBCNT_PSTAT_V(x) \
258 ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG) 258 ((x) << FW_RI_TPTE_MWBCNT_PSTAG_S)
259#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \ 259#define FW_RI_TPTE_MWBCNT_PSTAG_G(x) \
260 (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG) 260 (((x) >> FW_RI_TPTE_MWBCNT_PSTAG_S) & FW_RI_TPTE_MWBCNT_PSTAG_M)
261 261
262enum fw_ri_res_type { 262enum fw_ri_res_type {
263 FW_RI_RES_TYPE_SQ, 263 FW_RI_RES_TYPE_SQ,
@@ -308,222 +308,222 @@ struct fw_ri_res_wr {
308#endif 308#endif
309}; 309};
310 310
311#define S_FW_RI_RES_WR_NRES 0 311#define FW_RI_RES_WR_NRES_S 0
312#define M_FW_RI_RES_WR_NRES 0xff 312#define FW_RI_RES_WR_NRES_M 0xff
313#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES) 313#define FW_RI_RES_WR_NRES_V(x) ((x) << FW_RI_RES_WR_NRES_S)
314#define G_FW_RI_RES_WR_NRES(x) \ 314#define FW_RI_RES_WR_NRES_G(x) \
315 (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES) 315 (((x) >> FW_RI_RES_WR_NRES_S) & FW_RI_RES_WR_NRES_M)
316 316
317#define S_FW_RI_RES_WR_FETCHSZM 26 317#define FW_RI_RES_WR_FETCHSZM_S 26
318#define M_FW_RI_RES_WR_FETCHSZM 0x1 318#define FW_RI_RES_WR_FETCHSZM_M 0x1
319#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM) 319#define FW_RI_RES_WR_FETCHSZM_V(x) ((x) << FW_RI_RES_WR_FETCHSZM_S)
320#define G_FW_RI_RES_WR_FETCHSZM(x) \ 320#define FW_RI_RES_WR_FETCHSZM_G(x) \
321 (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM) 321 (((x) >> FW_RI_RES_WR_FETCHSZM_S) & FW_RI_RES_WR_FETCHSZM_M)
322#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U) 322#define FW_RI_RES_WR_FETCHSZM_F FW_RI_RES_WR_FETCHSZM_V(1U)
323 323
324#define S_FW_RI_RES_WR_STATUSPGNS 25 324#define FW_RI_RES_WR_STATUSPGNS_S 25
325#define M_FW_RI_RES_WR_STATUSPGNS 0x1 325#define FW_RI_RES_WR_STATUSPGNS_M 0x1
326#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS) 326#define FW_RI_RES_WR_STATUSPGNS_V(x) ((x) << FW_RI_RES_WR_STATUSPGNS_S)
327#define G_FW_RI_RES_WR_STATUSPGNS(x) \ 327#define FW_RI_RES_WR_STATUSPGNS_G(x) \
328 (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS) 328 (((x) >> FW_RI_RES_WR_STATUSPGNS_S) & FW_RI_RES_WR_STATUSPGNS_M)
329#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U) 329#define FW_RI_RES_WR_STATUSPGNS_F FW_RI_RES_WR_STATUSPGNS_V(1U)
330 330
331#define S_FW_RI_RES_WR_STATUSPGRO 24 331#define FW_RI_RES_WR_STATUSPGRO_S 24
332#define M_FW_RI_RES_WR_STATUSPGRO 0x1 332#define FW_RI_RES_WR_STATUSPGRO_M 0x1
333#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO) 333#define FW_RI_RES_WR_STATUSPGRO_V(x) ((x) << FW_RI_RES_WR_STATUSPGRO_S)
334#define G_FW_RI_RES_WR_STATUSPGRO(x) \ 334#define FW_RI_RES_WR_STATUSPGRO_G(x) \
335 (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO) 335 (((x) >> FW_RI_RES_WR_STATUSPGRO_S) & FW_RI_RES_WR_STATUSPGRO_M)
336#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U) 336#define FW_RI_RES_WR_STATUSPGRO_F FW_RI_RES_WR_STATUSPGRO_V(1U)
337 337
338#define S_FW_RI_RES_WR_FETCHNS 23 338#define FW_RI_RES_WR_FETCHNS_S 23
339#define M_FW_RI_RES_WR_FETCHNS 0x1 339#define FW_RI_RES_WR_FETCHNS_M 0x1
340#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS) 340#define FW_RI_RES_WR_FETCHNS_V(x) ((x) << FW_RI_RES_WR_FETCHNS_S)
341#define G_FW_RI_RES_WR_FETCHNS(x) \ 341#define FW_RI_RES_WR_FETCHNS_G(x) \
342 (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS) 342 (((x) >> FW_RI_RES_WR_FETCHNS_S) & FW_RI_RES_WR_FETCHNS_M)
343#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U) 343#define FW_RI_RES_WR_FETCHNS_F FW_RI_RES_WR_FETCHNS_V(1U)
344 344
345#define S_FW_RI_RES_WR_FETCHRO 22 345#define FW_RI_RES_WR_FETCHRO_S 22
346#define M_FW_RI_RES_WR_FETCHRO 0x1 346#define FW_RI_RES_WR_FETCHRO_M 0x1
347#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO) 347#define FW_RI_RES_WR_FETCHRO_V(x) ((x) << FW_RI_RES_WR_FETCHRO_S)
348#define G_FW_RI_RES_WR_FETCHRO(x) \ 348#define FW_RI_RES_WR_FETCHRO_G(x) \
349 (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO) 349 (((x) >> FW_RI_RES_WR_FETCHRO_S) & FW_RI_RES_WR_FETCHRO_M)
350#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U) 350#define FW_RI_RES_WR_FETCHRO_F FW_RI_RES_WR_FETCHRO_V(1U)
351 351
352#define S_FW_RI_RES_WR_HOSTFCMODE 20 352#define FW_RI_RES_WR_HOSTFCMODE_S 20
353#define M_FW_RI_RES_WR_HOSTFCMODE 0x3 353#define FW_RI_RES_WR_HOSTFCMODE_M 0x3
354#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE) 354#define FW_RI_RES_WR_HOSTFCMODE_V(x) ((x) << FW_RI_RES_WR_HOSTFCMODE_S)
355#define G_FW_RI_RES_WR_HOSTFCMODE(x) \ 355#define FW_RI_RES_WR_HOSTFCMODE_G(x) \
356 (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE) 356 (((x) >> FW_RI_RES_WR_HOSTFCMODE_S) & FW_RI_RES_WR_HOSTFCMODE_M)
357 357
358#define S_FW_RI_RES_WR_CPRIO 19 358#define FW_RI_RES_WR_CPRIO_S 19
359#define M_FW_RI_RES_WR_CPRIO 0x1 359#define FW_RI_RES_WR_CPRIO_M 0x1
360#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO) 360#define FW_RI_RES_WR_CPRIO_V(x) ((x) << FW_RI_RES_WR_CPRIO_S)
361#define G_FW_RI_RES_WR_CPRIO(x) \ 361#define FW_RI_RES_WR_CPRIO_G(x) \
362 (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO) 362 (((x) >> FW_RI_RES_WR_CPRIO_S) & FW_RI_RES_WR_CPRIO_M)
363#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U) 363#define FW_RI_RES_WR_CPRIO_F FW_RI_RES_WR_CPRIO_V(1U)
364 364
365#define S_FW_RI_RES_WR_ONCHIP 18 365#define FW_RI_RES_WR_ONCHIP_S 18
366#define M_FW_RI_RES_WR_ONCHIP 0x1 366#define FW_RI_RES_WR_ONCHIP_M 0x1
367#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP) 367#define FW_RI_RES_WR_ONCHIP_V(x) ((x) << FW_RI_RES_WR_ONCHIP_S)
368#define G_FW_RI_RES_WR_ONCHIP(x) \ 368#define FW_RI_RES_WR_ONCHIP_G(x) \
369 (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP) 369 (((x) >> FW_RI_RES_WR_ONCHIP_S) & FW_RI_RES_WR_ONCHIP_M)
370#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U) 370#define FW_RI_RES_WR_ONCHIP_F FW_RI_RES_WR_ONCHIP_V(1U)
371 371
372#define S_FW_RI_RES_WR_PCIECHN 16 372#define FW_RI_RES_WR_PCIECHN_S 16
373#define M_FW_RI_RES_WR_PCIECHN 0x3 373#define FW_RI_RES_WR_PCIECHN_M 0x3
374#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN) 374#define FW_RI_RES_WR_PCIECHN_V(x) ((x) << FW_RI_RES_WR_PCIECHN_S)
375#define G_FW_RI_RES_WR_PCIECHN(x) \ 375#define FW_RI_RES_WR_PCIECHN_G(x) \
376 (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN) 376 (((x) >> FW_RI_RES_WR_PCIECHN_S) & FW_RI_RES_WR_PCIECHN_M)
377 377
378#define S_FW_RI_RES_WR_IQID 0 378#define FW_RI_RES_WR_IQID_S 0
379#define M_FW_RI_RES_WR_IQID 0xffff 379#define FW_RI_RES_WR_IQID_M 0xffff
380#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID) 380#define FW_RI_RES_WR_IQID_V(x) ((x) << FW_RI_RES_WR_IQID_S)
381#define G_FW_RI_RES_WR_IQID(x) \ 381#define FW_RI_RES_WR_IQID_G(x) \
382 (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID) 382 (((x) >> FW_RI_RES_WR_IQID_S) & FW_RI_RES_WR_IQID_M)
383 383
384#define S_FW_RI_RES_WR_DCAEN 31 384#define FW_RI_RES_WR_DCAEN_S 31
385#define M_FW_RI_RES_WR_DCAEN 0x1 385#define FW_RI_RES_WR_DCAEN_M 0x1
386#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN) 386#define FW_RI_RES_WR_DCAEN_V(x) ((x) << FW_RI_RES_WR_DCAEN_S)
387#define G_FW_RI_RES_WR_DCAEN(x) \ 387#define FW_RI_RES_WR_DCAEN_G(x) \
388 (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN) 388 (((x) >> FW_RI_RES_WR_DCAEN_S) & FW_RI_RES_WR_DCAEN_M)
389#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U) 389#define FW_RI_RES_WR_DCAEN_F FW_RI_RES_WR_DCAEN_V(1U)
390 390
391#define S_FW_RI_RES_WR_DCACPU 26 391#define FW_RI_RES_WR_DCACPU_S 26
392#define M_FW_RI_RES_WR_DCACPU 0x1f 392#define FW_RI_RES_WR_DCACPU_M 0x1f
393#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU) 393#define FW_RI_RES_WR_DCACPU_V(x) ((x) << FW_RI_RES_WR_DCACPU_S)
394#define G_FW_RI_RES_WR_DCACPU(x) \ 394#define FW_RI_RES_WR_DCACPU_G(x) \
395 (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU) 395 (((x) >> FW_RI_RES_WR_DCACPU_S) & FW_RI_RES_WR_DCACPU_M)
396 396
397#define S_FW_RI_RES_WR_FBMIN 23 397#define FW_RI_RES_WR_FBMIN_S 23
398#define M_FW_RI_RES_WR_FBMIN 0x7 398#define FW_RI_RES_WR_FBMIN_M 0x7
399#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN) 399#define FW_RI_RES_WR_FBMIN_V(x) ((x) << FW_RI_RES_WR_FBMIN_S)
400#define G_FW_RI_RES_WR_FBMIN(x) \ 400#define FW_RI_RES_WR_FBMIN_G(x) \
401 (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN) 401 (((x) >> FW_RI_RES_WR_FBMIN_S) & FW_RI_RES_WR_FBMIN_M)
402 402
403#define S_FW_RI_RES_WR_FBMAX 20 403#define FW_RI_RES_WR_FBMAX_S 20
404#define M_FW_RI_RES_WR_FBMAX 0x7 404#define FW_RI_RES_WR_FBMAX_M 0x7
405#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX) 405#define FW_RI_RES_WR_FBMAX_V(x) ((x) << FW_RI_RES_WR_FBMAX_S)
406#define G_FW_RI_RES_WR_FBMAX(x) \ 406#define FW_RI_RES_WR_FBMAX_G(x) \
407 (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX) 407 (((x) >> FW_RI_RES_WR_FBMAX_S) & FW_RI_RES_WR_FBMAX_M)
408 408
409#define S_FW_RI_RES_WR_CIDXFTHRESHO 19 409#define FW_RI_RES_WR_CIDXFTHRESHO_S 19
410#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1 410#define FW_RI_RES_WR_CIDXFTHRESHO_M 0x1
411#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO) 411#define FW_RI_RES_WR_CIDXFTHRESHO_V(x) ((x) << FW_RI_RES_WR_CIDXFTHRESHO_S)
412#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \ 412#define FW_RI_RES_WR_CIDXFTHRESHO_G(x) \
413 (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO) 413 (((x) >> FW_RI_RES_WR_CIDXFTHRESHO_S) & FW_RI_RES_WR_CIDXFTHRESHO_M)
414#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U) 414#define FW_RI_RES_WR_CIDXFTHRESHO_F FW_RI_RES_WR_CIDXFTHRESHO_V(1U)
415 415
416#define S_FW_RI_RES_WR_CIDXFTHRESH 16 416#define FW_RI_RES_WR_CIDXFTHRESH_S 16
417#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7 417#define FW_RI_RES_WR_CIDXFTHRESH_M 0x7
418#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH) 418#define FW_RI_RES_WR_CIDXFTHRESH_V(x) ((x) << FW_RI_RES_WR_CIDXFTHRESH_S)
419#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \ 419#define FW_RI_RES_WR_CIDXFTHRESH_G(x) \
420 (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH) 420 (((x) >> FW_RI_RES_WR_CIDXFTHRESH_S) & FW_RI_RES_WR_CIDXFTHRESH_M)
421 421
422#define S_FW_RI_RES_WR_EQSIZE 0 422#define FW_RI_RES_WR_EQSIZE_S 0
423#define M_FW_RI_RES_WR_EQSIZE 0xffff 423#define FW_RI_RES_WR_EQSIZE_M 0xffff
424#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE) 424#define FW_RI_RES_WR_EQSIZE_V(x) ((x) << FW_RI_RES_WR_EQSIZE_S)
425#define G_FW_RI_RES_WR_EQSIZE(x) \ 425#define FW_RI_RES_WR_EQSIZE_G(x) \
426 (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE) 426 (((x) >> FW_RI_RES_WR_EQSIZE_S) & FW_RI_RES_WR_EQSIZE_M)
427 427
428#define S_FW_RI_RES_WR_IQANDST 15 428#define FW_RI_RES_WR_IQANDST_S 15
429#define M_FW_RI_RES_WR_IQANDST 0x1 429#define FW_RI_RES_WR_IQANDST_M 0x1
430#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST) 430#define FW_RI_RES_WR_IQANDST_V(x) ((x) << FW_RI_RES_WR_IQANDST_S)
431#define G_FW_RI_RES_WR_IQANDST(x) \ 431#define FW_RI_RES_WR_IQANDST_G(x) \
432 (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST) 432 (((x) >> FW_RI_RES_WR_IQANDST_S) & FW_RI_RES_WR_IQANDST_M)
433#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U) 433#define FW_RI_RES_WR_IQANDST_F FW_RI_RES_WR_IQANDST_V(1U)
434 434
435#define S_FW_RI_RES_WR_IQANUS 14 435#define FW_RI_RES_WR_IQANUS_S 14
436#define M_FW_RI_RES_WR_IQANUS 0x1 436#define FW_RI_RES_WR_IQANUS_M 0x1
437#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS) 437#define FW_RI_RES_WR_IQANUS_V(x) ((x) << FW_RI_RES_WR_IQANUS_S)
438#define G_FW_RI_RES_WR_IQANUS(x) \ 438#define FW_RI_RES_WR_IQANUS_G(x) \
439 (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS) 439 (((x) >> FW_RI_RES_WR_IQANUS_S) & FW_RI_RES_WR_IQANUS_M)
440#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U) 440#define FW_RI_RES_WR_IQANUS_F FW_RI_RES_WR_IQANUS_V(1U)
441 441
442#define S_FW_RI_RES_WR_IQANUD 12 442#define FW_RI_RES_WR_IQANUD_S 12
443#define M_FW_RI_RES_WR_IQANUD 0x3 443#define FW_RI_RES_WR_IQANUD_M 0x3
444#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD) 444#define FW_RI_RES_WR_IQANUD_V(x) ((x) << FW_RI_RES_WR_IQANUD_S)
445#define G_FW_RI_RES_WR_IQANUD(x) \ 445#define FW_RI_RES_WR_IQANUD_G(x) \
446 (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD) 446 (((x) >> FW_RI_RES_WR_IQANUD_S) & FW_RI_RES_WR_IQANUD_M)
447 447
448#define S_FW_RI_RES_WR_IQANDSTINDEX 0 448#define FW_RI_RES_WR_IQANDSTINDEX_S 0
449#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff 449#define FW_RI_RES_WR_IQANDSTINDEX_M 0xfff
450#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX) 450#define FW_RI_RES_WR_IQANDSTINDEX_V(x) ((x) << FW_RI_RES_WR_IQANDSTINDEX_S)
451#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \ 451#define FW_RI_RES_WR_IQANDSTINDEX_G(x) \
452 (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX) 452 (((x) >> FW_RI_RES_WR_IQANDSTINDEX_S) & FW_RI_RES_WR_IQANDSTINDEX_M)
453 453
454#define S_FW_RI_RES_WR_IQDROPRSS 15 454#define FW_RI_RES_WR_IQDROPRSS_S 15
455#define M_FW_RI_RES_WR_IQDROPRSS 0x1 455#define FW_RI_RES_WR_IQDROPRSS_M 0x1
456#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS) 456#define FW_RI_RES_WR_IQDROPRSS_V(x) ((x) << FW_RI_RES_WR_IQDROPRSS_S)
457#define G_FW_RI_RES_WR_IQDROPRSS(x) \ 457#define FW_RI_RES_WR_IQDROPRSS_G(x) \
458 (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS) 458 (((x) >> FW_RI_RES_WR_IQDROPRSS_S) & FW_RI_RES_WR_IQDROPRSS_M)
459#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U) 459#define FW_RI_RES_WR_IQDROPRSS_F FW_RI_RES_WR_IQDROPRSS_V(1U)
460 460
461#define S_FW_RI_RES_WR_IQGTSMODE 14 461#define FW_RI_RES_WR_IQGTSMODE_S 14
462#define M_FW_RI_RES_WR_IQGTSMODE 0x1 462#define FW_RI_RES_WR_IQGTSMODE_M 0x1
463#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE) 463#define FW_RI_RES_WR_IQGTSMODE_V(x) ((x) << FW_RI_RES_WR_IQGTSMODE_S)
464#define G_FW_RI_RES_WR_IQGTSMODE(x) \ 464#define FW_RI_RES_WR_IQGTSMODE_G(x) \
465 (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE) 465 (((x) >> FW_RI_RES_WR_IQGTSMODE_S) & FW_RI_RES_WR_IQGTSMODE_M)
466#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U) 466#define FW_RI_RES_WR_IQGTSMODE_F FW_RI_RES_WR_IQGTSMODE_V(1U)
467 467
468#define S_FW_RI_RES_WR_IQPCIECH 12 468#define FW_RI_RES_WR_IQPCIECH_S 12
469#define M_FW_RI_RES_WR_IQPCIECH 0x3 469#define FW_RI_RES_WR_IQPCIECH_M 0x3
470#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH) 470#define FW_RI_RES_WR_IQPCIECH_V(x) ((x) << FW_RI_RES_WR_IQPCIECH_S)
471#define G_FW_RI_RES_WR_IQPCIECH(x) \ 471#define FW_RI_RES_WR_IQPCIECH_G(x) \
472 (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH) 472 (((x) >> FW_RI_RES_WR_IQPCIECH_S) & FW_RI_RES_WR_IQPCIECH_M)
473 473
474#define S_FW_RI_RES_WR_IQDCAEN 11 474#define FW_RI_RES_WR_IQDCAEN_S 11
475#define M_FW_RI_RES_WR_IQDCAEN 0x1 475#define FW_RI_RES_WR_IQDCAEN_M 0x1
476#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN) 476#define FW_RI_RES_WR_IQDCAEN_V(x) ((x) << FW_RI_RES_WR_IQDCAEN_S)
477#define G_FW_RI_RES_WR_IQDCAEN(x) \ 477#define FW_RI_RES_WR_IQDCAEN_G(x) \
478 (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN) 478 (((x) >> FW_RI_RES_WR_IQDCAEN_S) & FW_RI_RES_WR_IQDCAEN_M)
479#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U) 479#define FW_RI_RES_WR_IQDCAEN_F FW_RI_RES_WR_IQDCAEN_V(1U)
480 480
481#define S_FW_RI_RES_WR_IQDCACPU 6 481#define FW_RI_RES_WR_IQDCACPU_S 6
482#define M_FW_RI_RES_WR_IQDCACPU 0x1f 482#define FW_RI_RES_WR_IQDCACPU_M 0x1f
483#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU) 483#define FW_RI_RES_WR_IQDCACPU_V(x) ((x) << FW_RI_RES_WR_IQDCACPU_S)
484#define G_FW_RI_RES_WR_IQDCACPU(x) \ 484#define FW_RI_RES_WR_IQDCACPU_G(x) \
485 (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU) 485 (((x) >> FW_RI_RES_WR_IQDCACPU_S) & FW_RI_RES_WR_IQDCACPU_M)
486 486
487#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4 487#define FW_RI_RES_WR_IQINTCNTTHRESH_S 4
488#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3 488#define FW_RI_RES_WR_IQINTCNTTHRESH_M 0x3
489#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \ 489#define FW_RI_RES_WR_IQINTCNTTHRESH_V(x) \
490 ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH) 490 ((x) << FW_RI_RES_WR_IQINTCNTTHRESH_S)
491#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \ 491#define FW_RI_RES_WR_IQINTCNTTHRESH_G(x) \
492 (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH) 492 (((x) >> FW_RI_RES_WR_IQINTCNTTHRESH_S) & FW_RI_RES_WR_IQINTCNTTHRESH_M)
493 493
494#define S_FW_RI_RES_WR_IQO 3 494#define FW_RI_RES_WR_IQO_S 3
495#define M_FW_RI_RES_WR_IQO 0x1 495#define FW_RI_RES_WR_IQO_M 0x1
496#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO) 496#define FW_RI_RES_WR_IQO_V(x) ((x) << FW_RI_RES_WR_IQO_S)
497#define G_FW_RI_RES_WR_IQO(x) \ 497#define FW_RI_RES_WR_IQO_G(x) \
498 (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO) 498 (((x) >> FW_RI_RES_WR_IQO_S) & FW_RI_RES_WR_IQO_M)
499#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U) 499#define FW_RI_RES_WR_IQO_F FW_RI_RES_WR_IQO_V(1U)
500 500
501#define S_FW_RI_RES_WR_IQCPRIO 2 501#define FW_RI_RES_WR_IQCPRIO_S 2
502#define M_FW_RI_RES_WR_IQCPRIO 0x1 502#define FW_RI_RES_WR_IQCPRIO_M 0x1
503#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO) 503#define FW_RI_RES_WR_IQCPRIO_V(x) ((x) << FW_RI_RES_WR_IQCPRIO_S)
504#define G_FW_RI_RES_WR_IQCPRIO(x) \ 504#define FW_RI_RES_WR_IQCPRIO_G(x) \
505 (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO) 505 (((x) >> FW_RI_RES_WR_IQCPRIO_S) & FW_RI_RES_WR_IQCPRIO_M)
506#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U) 506#define FW_RI_RES_WR_IQCPRIO_F FW_RI_RES_WR_IQCPRIO_V(1U)
507 507
508#define S_FW_RI_RES_WR_IQESIZE 0 508#define FW_RI_RES_WR_IQESIZE_S 0
509#define M_FW_RI_RES_WR_IQESIZE 0x3 509#define FW_RI_RES_WR_IQESIZE_M 0x3
510#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE) 510#define FW_RI_RES_WR_IQESIZE_V(x) ((x) << FW_RI_RES_WR_IQESIZE_S)
511#define G_FW_RI_RES_WR_IQESIZE(x) \ 511#define FW_RI_RES_WR_IQESIZE_G(x) \
512 (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE) 512 (((x) >> FW_RI_RES_WR_IQESIZE_S) & FW_RI_RES_WR_IQESIZE_M)
513 513
514#define S_FW_RI_RES_WR_IQNS 31 514#define FW_RI_RES_WR_IQNS_S 31
515#define M_FW_RI_RES_WR_IQNS 0x1 515#define FW_RI_RES_WR_IQNS_M 0x1
516#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS) 516#define FW_RI_RES_WR_IQNS_V(x) ((x) << FW_RI_RES_WR_IQNS_S)
517#define G_FW_RI_RES_WR_IQNS(x) \ 517#define FW_RI_RES_WR_IQNS_G(x) \
518 (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS) 518 (((x) >> FW_RI_RES_WR_IQNS_S) & FW_RI_RES_WR_IQNS_M)
519#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U) 519#define FW_RI_RES_WR_IQNS_F FW_RI_RES_WR_IQNS_V(1U)
520 520
521#define S_FW_RI_RES_WR_IQRO 30 521#define FW_RI_RES_WR_IQRO_S 30
522#define M_FW_RI_RES_WR_IQRO 0x1 522#define FW_RI_RES_WR_IQRO_M 0x1
523#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO) 523#define FW_RI_RES_WR_IQRO_V(x) ((x) << FW_RI_RES_WR_IQRO_S)
524#define G_FW_RI_RES_WR_IQRO(x) \ 524#define FW_RI_RES_WR_IQRO_G(x) \
525 (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO) 525 (((x) >> FW_RI_RES_WR_IQRO_S) & FW_RI_RES_WR_IQRO_M)
526#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U) 526#define FW_RI_RES_WR_IQRO_F FW_RI_RES_WR_IQRO_V(1U)
527 527
528struct fw_ri_rdma_write_wr { 528struct fw_ri_rdma_write_wr {
529 __u8 opcode; 529 __u8 opcode;
@@ -562,11 +562,11 @@ struct fw_ri_send_wr {
562#endif 562#endif
563}; 563};
564 564
565#define S_FW_RI_SEND_WR_SENDOP 0 565#define FW_RI_SEND_WR_SENDOP_S 0
566#define M_FW_RI_SEND_WR_SENDOP 0xf 566#define FW_RI_SEND_WR_SENDOP_M 0xf
567#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP) 567#define FW_RI_SEND_WR_SENDOP_V(x) ((x) << FW_RI_SEND_WR_SENDOP_S)
568#define G_FW_RI_SEND_WR_SENDOP(x) \ 568#define FW_RI_SEND_WR_SENDOP_G(x) \
569 (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP) 569 (((x) >> FW_RI_SEND_WR_SENDOP_S) & FW_RI_SEND_WR_SENDOP_M)
570 570
571struct fw_ri_rdma_read_wr { 571struct fw_ri_rdma_read_wr {
572 __u8 opcode; 572 __u8 opcode;
@@ -612,25 +612,25 @@ struct fw_ri_bind_mw_wr {
612 __be64 r4; 612 __be64 r4;
613}; 613};
614 614
615#define S_FW_RI_BIND_MW_WR_QPBINDE 6 615#define FW_RI_BIND_MW_WR_QPBINDE_S 6
616#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1 616#define FW_RI_BIND_MW_WR_QPBINDE_M 0x1
617#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE) 617#define FW_RI_BIND_MW_WR_QPBINDE_V(x) ((x) << FW_RI_BIND_MW_WR_QPBINDE_S)
618#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \ 618#define FW_RI_BIND_MW_WR_QPBINDE_G(x) \
619 (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE) 619 (((x) >> FW_RI_BIND_MW_WR_QPBINDE_S) & FW_RI_BIND_MW_WR_QPBINDE_M)
620#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U) 620#define FW_RI_BIND_MW_WR_QPBINDE_F FW_RI_BIND_MW_WR_QPBINDE_V(1U)
621 621
622#define S_FW_RI_BIND_MW_WR_NS 5 622#define FW_RI_BIND_MW_WR_NS_S 5
623#define M_FW_RI_BIND_MW_WR_NS 0x1 623#define FW_RI_BIND_MW_WR_NS_M 0x1
624#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS) 624#define FW_RI_BIND_MW_WR_NS_V(x) ((x) << FW_RI_BIND_MW_WR_NS_S)
625#define G_FW_RI_BIND_MW_WR_NS(x) \ 625#define FW_RI_BIND_MW_WR_NS_G(x) \
626 (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS) 626 (((x) >> FW_RI_BIND_MW_WR_NS_S) & FW_RI_BIND_MW_WR_NS_M)
627#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U) 627#define FW_RI_BIND_MW_WR_NS_F FW_RI_BIND_MW_WR_NS_V(1U)
628 628
629#define S_FW_RI_BIND_MW_WR_DCACPU 0 629#define FW_RI_BIND_MW_WR_DCACPU_S 0
630#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f 630#define FW_RI_BIND_MW_WR_DCACPU_M 0x1f
631#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU) 631#define FW_RI_BIND_MW_WR_DCACPU_V(x) ((x) << FW_RI_BIND_MW_WR_DCACPU_S)
632#define G_FW_RI_BIND_MW_WR_DCACPU(x) \ 632#define FW_RI_BIND_MW_WR_DCACPU_G(x) \
633 (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU) 633 (((x) >> FW_RI_BIND_MW_WR_DCACPU_S) & FW_RI_BIND_MW_WR_DCACPU_M)
634 634
635struct fw_ri_fr_nsmr_wr { 635struct fw_ri_fr_nsmr_wr {
636 __u8 opcode; 636 __u8 opcode;
@@ -649,25 +649,25 @@ struct fw_ri_fr_nsmr_wr {
649 __be32 va_lo_fbo; 649 __be32 va_lo_fbo;
650}; 650};
651 651
652#define S_FW_RI_FR_NSMR_WR_QPBINDE 6 652#define FW_RI_FR_NSMR_WR_QPBINDE_S 6
653#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1 653#define FW_RI_FR_NSMR_WR_QPBINDE_M 0x1
654#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE) 654#define FW_RI_FR_NSMR_WR_QPBINDE_V(x) ((x) << FW_RI_FR_NSMR_WR_QPBINDE_S)
655#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \ 655#define FW_RI_FR_NSMR_WR_QPBINDE_G(x) \
656 (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE) 656 (((x) >> FW_RI_FR_NSMR_WR_QPBINDE_S) & FW_RI_FR_NSMR_WR_QPBINDE_M)
657#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U) 657#define FW_RI_FR_NSMR_WR_QPBINDE_F FW_RI_FR_NSMR_WR_QPBINDE_V(1U)
658 658
659#define S_FW_RI_FR_NSMR_WR_NS 5 659#define FW_RI_FR_NSMR_WR_NS_S 5
660#define M_FW_RI_FR_NSMR_WR_NS 0x1 660#define FW_RI_FR_NSMR_WR_NS_M 0x1
661#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS) 661#define FW_RI_FR_NSMR_WR_NS_V(x) ((x) << FW_RI_FR_NSMR_WR_NS_S)
662#define G_FW_RI_FR_NSMR_WR_NS(x) \ 662#define FW_RI_FR_NSMR_WR_NS_G(x) \
663 (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS) 663 (((x) >> FW_RI_FR_NSMR_WR_NS_S) & FW_RI_FR_NSMR_WR_NS_M)
664#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U) 664#define FW_RI_FR_NSMR_WR_NS_F FW_RI_FR_NSMR_WR_NS_V(1U)
665 665
666#define S_FW_RI_FR_NSMR_WR_DCACPU 0 666#define FW_RI_FR_NSMR_WR_DCACPU_S 0
667#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f 667#define FW_RI_FR_NSMR_WR_DCACPU_M 0x1f
668#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU) 668#define FW_RI_FR_NSMR_WR_DCACPU_V(x) ((x) << FW_RI_FR_NSMR_WR_DCACPU_S)
669#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \ 669#define FW_RI_FR_NSMR_WR_DCACPU_G(x) \
670 (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU) 670 (((x) >> FW_RI_FR_NSMR_WR_DCACPU_S) & FW_RI_FR_NSMR_WR_DCACPU_M)
671 671
672struct fw_ri_inv_lstag_wr { 672struct fw_ri_inv_lstag_wr {
673 __u8 opcode; 673 __u8 opcode;
@@ -740,18 +740,18 @@ struct fw_ri_wr {
740 } u; 740 } u;
741}; 741};
742 742
743#define S_FW_RI_WR_MPAREQBIT 7 743#define FW_RI_WR_MPAREQBIT_S 7
744#define M_FW_RI_WR_MPAREQBIT 0x1 744#define FW_RI_WR_MPAREQBIT_M 0x1
745#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT) 745#define FW_RI_WR_MPAREQBIT_V(x) ((x) << FW_RI_WR_MPAREQBIT_S)
746#define G_FW_RI_WR_MPAREQBIT(x) \ 746#define FW_RI_WR_MPAREQBIT_G(x) \
747 (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT) 747 (((x) >> FW_RI_WR_MPAREQBIT_S) & FW_RI_WR_MPAREQBIT_M)
748#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U) 748#define FW_RI_WR_MPAREQBIT_F FW_RI_WR_MPAREQBIT_V(1U)
749 749
750#define S_FW_RI_WR_P2PTYPE 0 750#define FW_RI_WR_P2PTYPE_S 0
751#define M_FW_RI_WR_P2PTYPE 0xf 751#define FW_RI_WR_P2PTYPE_M 0xf
752#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE) 752#define FW_RI_WR_P2PTYPE_V(x) ((x) << FW_RI_WR_P2PTYPE_S)
753#define G_FW_RI_WR_P2PTYPE(x) \ 753#define FW_RI_WR_P2PTYPE_G(x) \
754 (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE) 754 (((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M)
755 755
756struct tcp_options { 756struct tcp_options {
757 __be16 mss; 757 __be16 mss;
@@ -783,58 +783,58 @@ struct cpl_pass_accept_req {
783}; 783};
784 784
785/* cpl_pass_accept_req.hdr_len fields */ 785/* cpl_pass_accept_req.hdr_len fields */
786#define S_SYN_RX_CHAN 0 786#define SYN_RX_CHAN_S 0
787#define M_SYN_RX_CHAN 0xF 787#define SYN_RX_CHAN_M 0xF
788#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN) 788#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
789#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN) 789#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
790 790
791#define S_TCP_HDR_LEN 10 791#define TCP_HDR_LEN_S 10
792#define M_TCP_HDR_LEN 0x3F 792#define TCP_HDR_LEN_M 0x3F
793#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN) 793#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
794#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN) 794#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
795 795
796#define S_IP_HDR_LEN 16 796#define IP_HDR_LEN_S 16
797#define M_IP_HDR_LEN 0x3FF 797#define IP_HDR_LEN_M 0x3FF
798#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN) 798#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
799#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN) 799#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
800 800
801#define S_ETH_HDR_LEN 26 801#define ETH_HDR_LEN_S 26
802#define M_ETH_HDR_LEN 0x1F 802#define ETH_HDR_LEN_M 0x1F
803#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN) 803#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
804#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN) 804#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
805 805
806/* cpl_pass_accept_req.l2info fields */ 806/* cpl_pass_accept_req.l2info fields */
807#define S_SYN_MAC_IDX 0 807#define SYN_MAC_IDX_S 0
808#define M_SYN_MAC_IDX 0x1FF 808#define SYN_MAC_IDX_M 0x1FF
809#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX) 809#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
810#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX) 810#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
811 811
812#define S_SYN_XACT_MATCH 9 812#define SYN_XACT_MATCH_S 9
813#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH) 813#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
814#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U) 814#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
815 815
816#define S_SYN_INTF 12 816#define SYN_INTF_S 12
817#define M_SYN_INTF 0xF 817#define SYN_INTF_M 0xF
818#define V_SYN_INTF(x) ((x) << S_SYN_INTF) 818#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
819#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF) 819#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
820 820
821struct ulptx_idata { 821struct ulptx_idata {
822 __be32 cmd_more; 822 __be32 cmd_more;
823 __be32 len; 823 __be32 len;
824}; 824};
825 825
826#define S_ULPTX_NSGE 0 826#define ULPTX_NSGE_S 0
827#define M_ULPTX_NSGE 0xFFFF 827#define ULPTX_NSGE_M 0xFFFF
828#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE) 828#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
829 829
830#define S_RX_DACK_MODE 29 830#define RX_DACK_MODE_S 29
831#define M_RX_DACK_MODE 0x3 831#define RX_DACK_MODE_M 0x3
832#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE) 832#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
833#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE) 833#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
834 834
835#define S_RX_DACK_CHANGE 31 835#define RX_DACK_CHANGE_S 31
836#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) 836#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
837#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) 837#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
838 838
839enum { /* TCP congestion control algorithms */ 839enum { /* TCP congestion control algorithms */
840 CONG_ALG_RENO, 840 CONG_ALG_RENO,
@@ -843,10 +843,10 @@ enum { /* TCP congestion control algorithms */
843 CONG_ALG_HIGHSPEED 843 CONG_ALG_HIGHSPEED
844}; 844};
845 845
846#define S_CONG_CNTRL 14 846#define CONG_CNTRL_S 14
847#define M_CONG_CNTRL 0x3 847#define CONG_CNTRL_M 0x3
848#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) 848#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
849#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) 849#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
850 850
851#define CONG_CNTRL_VALID (1 << 18) 851#define CONG_CNTRL_VALID (1 << 18)
852 852
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 2d8c3397774f..f50a546224ad 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -36,6 +36,7 @@
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/inet.h> 37#include <linux/inet.h>
38#include <linux/string.h> 38#include <linux/string.h>
39#include <linux/mlx4/driver.h>
39 40
40#include "mlx4_ib.h" 41#include "mlx4_ib.h"
41 42
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 0eb141c41416..a31e031afd87 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
154 continue; 154 continue;
155 155
156 slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; 156 slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
157 if (slave_id >= dev->dev->num_vfs + 1) 157 if (slave_id >= dev->dev->persist->num_vfs + 1)
158 return; 158 return;
159 tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; 159 tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
160 form_cache_ag = get_cached_alias_guid(dev, port_num, 160 form_cache_ag = get_cached_alias_guid(dev, port_num,
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a3b70f6c4035..543ecdd8667b 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -188,6 +188,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
188 spin_lock_init(&cq->lock); 188 spin_lock_init(&cq->lock);
189 cq->resize_buf = NULL; 189 cq->resize_buf = NULL;
190 cq->resize_umem = NULL; 190 cq->resize_umem = NULL;
191 INIT_LIST_HEAD(&cq->send_qp_list);
192 INIT_LIST_HEAD(&cq->recv_qp_list);
191 193
192 if (context) { 194 if (context) {
193 struct mlx4_ib_create_cq ucmd; 195 struct mlx4_ib_create_cq ucmd;
@@ -594,6 +596,55 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
594 return 0; 596 return 0;
595} 597}
596 598
599static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
600 struct ib_wc *wc, int *npolled, int is_send)
601{
602 struct mlx4_ib_wq *wq;
603 unsigned cur;
604 int i;
605
606 wq = is_send ? &qp->sq : &qp->rq;
607 cur = wq->head - wq->tail;
608
609 if (cur == 0)
610 return;
611
612 for (i = 0; i < cur && *npolled < num_entries; i++) {
613 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
614 wc->status = IB_WC_WR_FLUSH_ERR;
615 wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
616 wq->tail++;
617 (*npolled)++;
618 wc->qp = &qp->ibqp;
619 wc++;
620 }
621}
622
623static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
624 struct ib_wc *wc, int *npolled)
625{
626 struct mlx4_ib_qp *qp;
627
628 *npolled = 0;
629 /* Find uncompleted WQEs belonging to that cq and retrun
630 * simulated FLUSH_ERR completions
631 */
632 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
633 mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
634 if (*npolled >= num_entries)
635 goto out;
636 }
637
638 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
639 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
640 if (*npolled >= num_entries)
641 goto out;
642 }
643
644out:
645 return;
646}
647
597static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, 648static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
598 struct mlx4_ib_qp **cur_qp, 649 struct mlx4_ib_qp **cur_qp,
599 struct ib_wc *wc) 650 struct ib_wc *wc)
@@ -836,8 +887,13 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
836 unsigned long flags; 887 unsigned long flags;
837 int npolled; 888 int npolled;
838 int err = 0; 889 int err = 0;
890 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
839 891
840 spin_lock_irqsave(&cq->lock, flags); 892 spin_lock_irqsave(&cq->lock, flags);
893 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
894 mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
895 goto out;
896 }
841 897
842 for (npolled = 0; npolled < num_entries; ++npolled) { 898 for (npolled = 0; npolled < num_entries; ++npolled) {
843 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); 899 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
@@ -847,6 +903,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
847 903
848 mlx4_cq_set_ci(&cq->mcq); 904 mlx4_cq_set_ci(&cq->mcq);
849 905
906out:
850 spin_unlock_irqrestore(&cq->lock, flags); 907 spin_unlock_irqrestore(&cq->lock, flags);
851 908
852 if (err == 0 || err == -EAGAIN) 909 if (err == 0 || err == -EAGAIN)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 82a7dd87089b..c7619716c31d 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1951,7 +1951,8 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
1951 ctx->ib_dev = &dev->ib_dev; 1951 ctx->ib_dev = &dev->ib_dev;
1952 1952
1953 for (i = 0; 1953 for (i = 0;
1954 i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1)); 1954 i < min(dev->dev->caps.sqp_demux,
1955 (u16)(dev->dev->persist->num_vfs + 1));
1955 i++) { 1956 i++) {
1956 struct mlx4_active_ports actv_ports = 1957 struct mlx4_active_ports actv_ports =
1957 mlx4_get_active_ports(dev->dev, i); 1958 mlx4_get_active_ports(dev->dev, i);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 9117b7a2d5f8..eb8e215f1613 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -198,7 +198,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
198 198
199 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 199 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
200 0xffffff; 200 0xffffff;
201 props->vendor_part_id = dev->dev->pdev->device; 201 props->vendor_part_id = dev->dev->persist->pdev->device;
202 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); 202 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
203 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 203 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
204 204
@@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
351 enum ib_mtu tmp; 351 enum ib_mtu tmp;
352 struct mlx4_cmd_mailbox *mailbox; 352 struct mlx4_cmd_mailbox *mailbox;
353 int err = 0; 353 int err = 0;
354 int is_bonded = mlx4_is_bonded(mdev->dev);
354 355
355 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 356 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
356 if (IS_ERR(mailbox)) 357 if (IS_ERR(mailbox))
@@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
374 props->state = IB_PORT_DOWN; 375 props->state = IB_PORT_DOWN;
375 props->phys_state = state_to_phys_state(props->state); 376 props->phys_state = state_to_phys_state(props->state);
376 props->active_mtu = IB_MTU_256; 377 props->active_mtu = IB_MTU_256;
378 if (is_bonded)
379 rtnl_lock(); /* required to get upper dev */
377 spin_lock_bh(&iboe->lock); 380 spin_lock_bh(&iboe->lock);
378 ndev = iboe->netdevs[port - 1]; 381 ndev = iboe->netdevs[port - 1];
382 if (ndev && is_bonded)
383 ndev = netdev_master_upper_dev_get(ndev);
379 if (!ndev) 384 if (!ndev)
380 goto out_unlock; 385 goto out_unlock;
381 386
@@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
387 props->phys_state = state_to_phys_state(props->state); 392 props->phys_state = state_to_phys_state(props->state);
388out_unlock: 393out_unlock:
389 spin_unlock_bh(&iboe->lock); 394 spin_unlock_bh(&iboe->lock);
395 if (is_bonded)
396 rtnl_unlock();
390out: 397out:
391 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 398 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
392 return err; 399 return err;
@@ -844,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
844 851
845struct mlx4_ib_steering { 852struct mlx4_ib_steering {
846 struct list_head list; 853 struct list_head list;
847 u64 reg_id; 854 struct mlx4_flow_reg_id reg_id;
848 union ib_gid gid; 855 union ib_gid gid;
849}; 856};
850 857
@@ -1135,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1135 struct ib_flow_attr *flow_attr, 1142 struct ib_flow_attr *flow_attr,
1136 int domain) 1143 int domain)
1137{ 1144{
1138 int err = 0, i = 0; 1145 int err = 0, i = 0, j = 0;
1139 struct mlx4_ib_flow *mflow; 1146 struct mlx4_ib_flow *mflow;
1140 enum mlx4_net_trans_promisc_mode type[2]; 1147 enum mlx4_net_trans_promisc_mode type[2];
1148 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1149 int is_bonded = mlx4_is_bonded(dev);
1141 1150
1142 memset(type, 0, sizeof(type)); 1151 memset(type, 0, sizeof(type));
1143 1152
@@ -1172,26 +1181,58 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1172 1181
1173 while (i < ARRAY_SIZE(type) && type[i]) { 1182 while (i < ARRAY_SIZE(type) && type[i]) {
1174 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], 1183 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1175 &mflow->reg_id[i]); 1184 &mflow->reg_id[i].id);
1176 if (err) 1185 if (err)
1177 goto err_create_flow; 1186 goto err_create_flow;
1178 i++; 1187 i++;
1188 if (is_bonded) {
1189 /* Application always sees one port so the mirror rule
1190 * must be on port #2
1191 */
1192 flow_attr->port = 2;
1193 err = __mlx4_ib_create_flow(qp, flow_attr,
1194 domain, type[j],
1195 &mflow->reg_id[j].mirror);
1196 flow_attr->port = 1;
1197 if (err)
1198 goto err_create_flow;
1199 j++;
1200 }
1201
1179 } 1202 }
1180 1203
1181 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1204 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1182 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); 1205 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1206 &mflow->reg_id[i].id);
1183 if (err) 1207 if (err)
1184 goto err_create_flow; 1208 goto err_create_flow;
1185 i++; 1209 i++;
1210 if (is_bonded) {
1211 flow_attr->port = 2;
1212 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1213 &mflow->reg_id[j].mirror);
1214 flow_attr->port = 1;
1215 if (err)
1216 goto err_create_flow;
1217 j++;
1218 }
1219 /* function to create mirror rule */
1186 } 1220 }
1187 1221
1188 return &mflow->ibflow; 1222 return &mflow->ibflow;
1189 1223
1190err_create_flow: 1224err_create_flow:
1191 while (i) { 1225 while (i) {
1192 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]); 1226 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1227 mflow->reg_id[i].id);
1193 i--; 1228 i--;
1194 } 1229 }
1230
1231 while (j) {
1232 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1233 mflow->reg_id[j].mirror);
1234 j--;
1235 }
1195err_free: 1236err_free:
1196 kfree(mflow); 1237 kfree(mflow);
1197 return ERR_PTR(err); 1238 return ERR_PTR(err);
@@ -1204,10 +1245,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1204 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); 1245 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1205 struct mlx4_ib_flow *mflow = to_mflow(flow_id); 1246 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1206 1247
1207 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) { 1248 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1208 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]); 1249 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1209 if (err) 1250 if (err)
1210 ret = err; 1251 ret = err;
1252 if (mflow->reg_id[i].mirror) {
1253 err = __mlx4_ib_destroy_flow(mdev->dev,
1254 mflow->reg_id[i].mirror);
1255 if (err)
1256 ret = err;
1257 }
1211 i++; 1258 i++;
1212 } 1259 }
1213 1260
@@ -1219,11 +1266,12 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1219{ 1266{
1220 int err; 1267 int err;
1221 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1268 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1269 struct mlx4_dev *dev = mdev->dev;
1222 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1270 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1223 u64 reg_id;
1224 struct mlx4_ib_steering *ib_steering = NULL; 1271 struct mlx4_ib_steering *ib_steering = NULL;
1225 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1272 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1226 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; 1273 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1274 struct mlx4_flow_reg_id reg_id;
1227 1275
1228 if (mdev->dev->caps.steering_mode == 1276 if (mdev->dev->caps.steering_mode ==
1229 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1277 MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1235,10 +1283,21 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1235 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, 1283 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1236 !!(mqp->flags & 1284 !!(mqp->flags &
1237 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1285 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1238 prot, &reg_id); 1286 prot, &reg_id.id);
1239 if (err) 1287 if (err)
1240 goto err_malloc; 1288 goto err_malloc;
1241 1289
1290 reg_id.mirror = 0;
1291 if (mlx4_is_bonded(dev)) {
1292 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1293 (mqp->port == 1) ? 2 : 1,
1294 !!(mqp->flags &
1295 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1296 prot, &reg_id.mirror);
1297 if (err)
1298 goto err_add;
1299 }
1300
1242 err = add_gid_entry(ibqp, gid); 1301 err = add_gid_entry(ibqp, gid);
1243 if (err) 1302 if (err)
1244 goto err_add; 1303 goto err_add;
@@ -1254,7 +1313,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1254 1313
1255err_add: 1314err_add:
1256 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1315 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1257 prot, reg_id); 1316 prot, reg_id.id);
1317 if (reg_id.mirror)
1318 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1319 prot, reg_id.mirror);
1258err_malloc: 1320err_malloc:
1259 kfree(ib_steering); 1321 kfree(ib_steering);
1260 1322
@@ -1281,10 +1343,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1281{ 1343{
1282 int err; 1344 int err;
1283 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1345 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1346 struct mlx4_dev *dev = mdev->dev;
1284 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1347 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1285 struct net_device *ndev; 1348 struct net_device *ndev;
1286 struct mlx4_ib_gid_entry *ge; 1349 struct mlx4_ib_gid_entry *ge;
1287 u64 reg_id = 0; 1350 struct mlx4_flow_reg_id reg_id = {0, 0};
1351
1288 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1352 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1289 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; 1353 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1290 1354
@@ -1309,10 +1373,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1309 } 1373 }
1310 1374
1311 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1375 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1312 prot, reg_id); 1376 prot, reg_id.id);
1313 if (err) 1377 if (err)
1314 return err; 1378 return err;
1315 1379
1380 if (mlx4_is_bonded(dev)) {
1381 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1382 prot, reg_id.mirror);
1383 if (err)
1384 return err;
1385 }
1386
1316 mutex_lock(&mqp->mutex); 1387 mutex_lock(&mqp->mutex);
1317 ge = find_gid_entry(mqp, gid->raw); 1388 ge = find_gid_entry(mqp, gid->raw);
1318 if (ge) { 1389 if (ge) {
@@ -1376,7 +1447,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1376{ 1447{
1377 struct mlx4_ib_dev *dev = 1448 struct mlx4_ib_dev *dev =
1378 container_of(device, struct mlx4_ib_dev, ib_dev.dev); 1449 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1379 return sprintf(buf, "MT%d\n", dev->dev->pdev->device); 1450 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
1380} 1451}
1381 1452
1382static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 1453static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
@@ -1440,6 +1511,7 @@ static void update_gids_task(struct work_struct *work)
1440 union ib_gid *gids; 1511 union ib_gid *gids;
1441 int err; 1512 int err;
1442 struct mlx4_dev *dev = gw->dev->dev; 1513 struct mlx4_dev *dev = gw->dev->dev;
1514 int is_bonded = mlx4_is_bonded(dev);
1443 1515
1444 if (!gw->dev->ib_active) 1516 if (!gw->dev->ib_active)
1445 return; 1517 return;
@@ -1459,7 +1531,10 @@ static void update_gids_task(struct work_struct *work)
1459 if (err) 1531 if (err)
1460 pr_warn("set port command failed\n"); 1532 pr_warn("set port command failed\n");
1461 else 1533 else
1462 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); 1534 if ((gw->port == 1) || !is_bonded)
1535 mlx4_ib_dispatch_event(gw->dev,
1536 is_bonded ? 1 : gw->port,
1537 IB_EVENT_GID_CHANGE);
1463 1538
1464 mlx4_free_cmd_mailbox(dev, mailbox); 1539 mlx4_free_cmd_mailbox(dev, mailbox);
1465 kfree(gw); 1540 kfree(gw);
@@ -1875,7 +1950,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1875 * don't want the bond IP based gids in the table since 1950 * don't want the bond IP based gids in the table since
1876 * flows that select port by gid may get the down port. 1951 * flows that select port by gid may get the down port.
1877 */ 1952 */
1878 if (port_state == IB_PORT_DOWN) { 1953 if (port_state == IB_PORT_DOWN &&
1954 !mlx4_is_bonded(ibdev->dev)) {
1879 reset_gid_table(ibdev, port); 1955 reset_gid_table(ibdev, port);
1880 mlx4_ib_set_default_gid(ibdev, 1956 mlx4_ib_set_default_gid(ibdev,
1881 curr_netdev, 1957 curr_netdev,
@@ -1938,7 +2014,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
1938 int i; 2014 int i;
1939 2015
1940 if (mlx4_is_master(ibdev->dev)) { 2016 if (mlx4_is_master(ibdev->dev)) {
1941 for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) { 2017 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2018 ++slave) {
1942 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { 2019 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1943 for (i = 0; 2020 for (i = 0;
1944 i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; 2021 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
@@ -1995,7 +2072,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1995 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { 2072 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1996 for (j = 0; j < eq_per_port; j++) { 2073 for (j = 0; j < eq_per_port; j++) {
1997 snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", 2074 snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
1998 i, j, dev->pdev->bus->name); 2075 i, j, dev->persist->pdev->bus->name);
1999 /* Set IRQ for specific name (per ring) */ 2076 /* Set IRQ for specific name (per ring) */
2000 if (mlx4_assign_eq(dev, name, NULL, 2077 if (mlx4_assign_eq(dev, name, NULL,
2001 &ibdev->eq_table[eq])) { 2078 &ibdev->eq_table[eq])) {
@@ -2046,6 +2123,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2046 int err; 2123 int err;
2047 struct mlx4_ib_iboe *iboe; 2124 struct mlx4_ib_iboe *iboe;
2048 int ib_num_ports = 0; 2125 int ib_num_ports = 0;
2126 int num_req_counters;
2049 2127
2050 pr_info_once("%s", mlx4_ib_version); 2128 pr_info_once("%s", mlx4_ib_version);
2051 2129
@@ -2059,7 +2137,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2059 2137
2060 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); 2138 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2061 if (!ibdev) { 2139 if (!ibdev) {
2062 dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); 2140 dev_err(&dev->persist->pdev->dev,
2141 "Device struct alloc failed\n");
2063 return NULL; 2142 return NULL;
2064 } 2143 }
2065 2144
@@ -2078,15 +2157,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2078 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); 2157 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2079 2158
2080 ibdev->dev = dev; 2159 ibdev->dev = dev;
2160 ibdev->bond_next_port = 0;
2081 2161
2082 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); 2162 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2083 ibdev->ib_dev.owner = THIS_MODULE; 2163 ibdev->ib_dev.owner = THIS_MODULE;
2084 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; 2164 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
2085 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; 2165 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
2086 ibdev->num_ports = num_ports; 2166 ibdev->num_ports = num_ports;
2087 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; 2167 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2168 1 : ibdev->num_ports;
2088 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; 2169 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2089 ibdev->ib_dev.dma_device = &dev->pdev->dev; 2170 ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
2090 2171
2091 if (dev->caps.userspace_caps) 2172 if (dev->caps.userspace_caps)
2092 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; 2173 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
@@ -2205,7 +2286,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2205 if (init_node_data(ibdev)) 2286 if (init_node_data(ibdev))
2206 goto err_map; 2287 goto err_map;
2207 2288
2208 for (i = 0; i < ibdev->num_ports; ++i) { 2289 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2290 for (i = 0; i < num_req_counters; ++i) {
2209 mutex_init(&ibdev->qp1_proxy_lock[i]); 2291 mutex_init(&ibdev->qp1_proxy_lock[i]);
2210 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == 2292 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2211 IB_LINK_LAYER_ETHERNET) { 2293 IB_LINK_LAYER_ETHERNET) {
@@ -2216,12 +2298,18 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2216 ibdev->counters[i] = -1; 2298 ibdev->counters[i] = -1;
2217 } 2299 }
2218 } 2300 }
2301 if (mlx4_is_bonded(dev))
2302 for (i = 1; i < ibdev->num_ports ; ++i)
2303 ibdev->counters[i] = ibdev->counters[0];
2304
2219 2305
2220 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2306 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2221 ib_num_ports++; 2307 ib_num_ports++;
2222 2308
2223 spin_lock_init(&ibdev->sm_lock); 2309 spin_lock_init(&ibdev->sm_lock);
2224 mutex_init(&ibdev->cap_mask_mutex); 2310 mutex_init(&ibdev->cap_mask_mutex);
2311 INIT_LIST_HEAD(&ibdev->qp_list);
2312 spin_lock_init(&ibdev->reset_flow_resource_lock);
2225 2313
2226 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && 2314 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2227 ib_num_ports) { 2315 ib_num_ports) {
@@ -2237,7 +2325,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2237 sizeof(long), 2325 sizeof(long),
2238 GFP_KERNEL); 2326 GFP_KERNEL);
2239 if (!ibdev->ib_uc_qpns_bitmap) { 2327 if (!ibdev->ib_uc_qpns_bitmap) {
2240 dev_err(&dev->pdev->dev, "bit map alloc failed\n"); 2328 dev_err(&dev->persist->pdev->dev,
2329 "bit map alloc failed\n");
2241 goto err_steer_qp_release; 2330 goto err_steer_qp_release;
2242 } 2331 }
2243 2332
@@ -2535,6 +2624,99 @@ out:
2535 return; 2624 return;
2536} 2625}
2537 2626
2627static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
2628{
2629 struct mlx4_ib_qp *mqp;
2630 unsigned long flags_qp;
2631 unsigned long flags_cq;
2632 struct mlx4_ib_cq *send_mcq, *recv_mcq;
2633 struct list_head cq_notify_list;
2634 struct mlx4_cq *mcq;
2635 unsigned long flags;
2636
2637 pr_warn("mlx4_ib_handle_catas_error was started\n");
2638 INIT_LIST_HEAD(&cq_notify_list);
2639
2640 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2641 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2642
2643 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2644 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2645 if (mqp->sq.tail != mqp->sq.head) {
2646 send_mcq = to_mcq(mqp->ibqp.send_cq);
2647 spin_lock_irqsave(&send_mcq->lock, flags_cq);
2648 if (send_mcq->mcq.comp &&
2649 mqp->ibqp.send_cq->comp_handler) {
2650 if (!send_mcq->mcq.reset_notify_added) {
2651 send_mcq->mcq.reset_notify_added = 1;
2652 list_add_tail(&send_mcq->mcq.reset_notify,
2653 &cq_notify_list);
2654 }
2655 }
2656 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2657 }
2658 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2659 /* Now, handle the QP's receive queue */
2660 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2661 /* no handling is needed for SRQ */
2662 if (!mqp->ibqp.srq) {
2663 if (mqp->rq.tail != mqp->rq.head) {
2664 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2665 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2666 if (recv_mcq->mcq.comp &&
2667 mqp->ibqp.recv_cq->comp_handler) {
2668 if (!recv_mcq->mcq.reset_notify_added) {
2669 recv_mcq->mcq.reset_notify_added = 1;
2670 list_add_tail(&recv_mcq->mcq.reset_notify,
2671 &cq_notify_list);
2672 }
2673 }
2674 spin_unlock_irqrestore(&recv_mcq->lock,
2675 flags_cq);
2676 }
2677 }
2678 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2679 }
2680
2681 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
2682 mcq->comp(mcq);
2683 }
2684 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2685 pr_warn("mlx4_ib_handle_catas_error ended\n");
2686}
2687
2688static void handle_bonded_port_state_event(struct work_struct *work)
2689{
2690 struct ib_event_work *ew =
2691 container_of(work, struct ib_event_work, work);
2692 struct mlx4_ib_dev *ibdev = ew->ib_dev;
2693 enum ib_port_state bonded_port_state = IB_PORT_NOP;
2694 int i;
2695 struct ib_event ibev;
2696
2697 kfree(ew);
2698 spin_lock_bh(&ibdev->iboe.lock);
2699 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
2700 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
2701
2702 enum ib_port_state curr_port_state =
2703 (netif_running(curr_netdev) &&
2704 netif_carrier_ok(curr_netdev)) ?
2705 IB_PORT_ACTIVE : IB_PORT_DOWN;
2706
2707 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
2708 curr_port_state : IB_PORT_ACTIVE;
2709 }
2710 spin_unlock_bh(&ibdev->iboe.lock);
2711
2712 ibev.device = &ibdev->ib_dev;
2713 ibev.element.port_num = 1;
2714 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
2715 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2716
2717 ib_dispatch_event(&ibev);
2718}
2719
2538static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 2720static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2539 enum mlx4_dev_event event, unsigned long param) 2721 enum mlx4_dev_event event, unsigned long param)
2540{ 2722{
@@ -2544,6 +2726,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2544 struct ib_event_work *ew; 2726 struct ib_event_work *ew;
2545 int p = 0; 2727 int p = 0;
2546 2728
2729 if (mlx4_is_bonded(dev) &&
2730 ((event == MLX4_DEV_EVENT_PORT_UP) ||
2731 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
2732 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
2733 if (!ew)
2734 return;
2735 INIT_WORK(&ew->work, handle_bonded_port_state_event);
2736 ew->ib_dev = ibdev;
2737 queue_work(wq, &ew->work);
2738 return;
2739 }
2740
2547 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) 2741 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2548 eqe = (struct mlx4_eqe *)param; 2742 eqe = (struct mlx4_eqe *)param;
2549 else 2743 else
@@ -2570,6 +2764,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2570 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: 2764 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2571 ibdev->ib_active = false; 2765 ibdev->ib_active = false;
2572 ibev.event = IB_EVENT_DEVICE_FATAL; 2766 ibev.event = IB_EVENT_DEVICE_FATAL;
2767 mlx4_ib_handle_catas_error(ibdev);
2573 break; 2768 break;
2574 2769
2575 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: 2770 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
@@ -2604,7 +2799,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2604 } 2799 }
2605 2800
2606 ibev.device = ibdev_ptr; 2801 ibev.device = ibdev_ptr;
2607 ibev.element.port_num = (u8) p; 2802 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
2608 2803
2609 ib_dispatch_event(&ibev); 2804 ib_dispatch_event(&ibev);
2610} 2805}
@@ -2613,7 +2808,8 @@ static struct mlx4_interface mlx4_ib_interface = {
2613 .add = mlx4_ib_add, 2808 .add = mlx4_ib_add,
2614 .remove = mlx4_ib_remove, 2809 .remove = mlx4_ib_remove,
2615 .event = mlx4_ib_event, 2810 .event = mlx4_ib_event,
2616 .protocol = MLX4_PROT_IB_IPV6 2811 .protocol = MLX4_PROT_IB_IPV6,
2812 .flags = MLX4_INTFF_BONDING
2617}; 2813};
2618 2814
2619static int __init mlx4_ib_init(void) 2815static int __init mlx4_ib_init(void)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6eb743f65f6f..f829fd935b79 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -110,6 +110,9 @@ struct mlx4_ib_cq {
110 struct mutex resize_mutex; 110 struct mutex resize_mutex;
111 struct ib_umem *umem; 111 struct ib_umem *umem;
112 struct ib_umem *resize_umem; 112 struct ib_umem *resize_umem;
113 /* List of qps that it serves.*/
114 struct list_head send_qp_list;
115 struct list_head recv_qp_list;
113}; 116};
114 117
115struct mlx4_ib_mr { 118struct mlx4_ib_mr {
@@ -134,10 +137,17 @@ struct mlx4_ib_fmr {
134 struct mlx4_fmr mfmr; 137 struct mlx4_fmr mfmr;
135}; 138};
136 139
140#define MAX_REGS_PER_FLOW 2
141
142struct mlx4_flow_reg_id {
143 u64 id;
144 u64 mirror;
145};
146
137struct mlx4_ib_flow { 147struct mlx4_ib_flow {
138 struct ib_flow ibflow; 148 struct ib_flow ibflow;
139 /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */ 149 /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
140 u64 reg_id[2]; 150 struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
141}; 151};
142 152
143struct mlx4_ib_wq { 153struct mlx4_ib_wq {
@@ -293,6 +303,9 @@ struct mlx4_ib_qp {
293 struct mlx4_roce_smac_vlan_info pri; 303 struct mlx4_roce_smac_vlan_info pri;
294 struct mlx4_roce_smac_vlan_info alt; 304 struct mlx4_roce_smac_vlan_info alt;
295 u64 reg_id; 305 u64 reg_id;
306 struct list_head qps_list;
307 struct list_head cq_recv_list;
308 struct list_head cq_send_list;
296}; 309};
297 310
298struct mlx4_ib_srq { 311struct mlx4_ib_srq {
@@ -527,6 +540,10 @@ struct mlx4_ib_dev {
527 struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; 540 struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
528 /* lock when destroying qp1_proxy and getting netdev events */ 541 /* lock when destroying qp1_proxy and getting netdev events */
529 struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; 542 struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
543 u8 bond_next_port;
544 /* protect resources needed as part of reset flow */
545 spinlock_t reset_flow_resource_lock;
546 struct list_head qp_list;
530}; 547};
531 548
532struct ib_event_work { 549struct ib_event_work {
@@ -622,6 +639,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
622 return container_of(ibah, struct mlx4_ib_ah, ibah); 639 return container_of(ibah, struct mlx4_ib_ah, ibah);
623} 640}
624 641
642static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
643{
644 dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
645
646 return dev->bond_next_port + 1;
647}
648
625int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); 649int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
626void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); 650void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
627 651
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index c36ccbd9a644..e0d271782d0a 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -401,7 +401,8 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
401 if (!mfrpl->ibfrpl.page_list) 401 if (!mfrpl->ibfrpl.page_list)
402 goto err_free; 402 goto err_free;
403 403
404 mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev, 404 mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist->
405 pdev->dev,
405 size, &mfrpl->map, 406 size, &mfrpl->map,
406 GFP_KERNEL); 407 GFP_KERNEL);
407 if (!mfrpl->mapped_page_list) 408 if (!mfrpl->mapped_page_list)
@@ -423,7 +424,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
423 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); 424 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
424 int size = page_list->max_page_list_len * sizeof (u64); 425 int size = page_list->max_page_list_len * sizeof (u64);
425 426
426 dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list, 427 dma_free_coherent(&dev->dev->persist->pdev->dev, size,
428 mfrpl->mapped_page_list,
427 mfrpl->map); 429 mfrpl->map);
428 kfree(mfrpl->ibfrpl.page_list); 430 kfree(mfrpl->ibfrpl.page_list);
429 kfree(mfrpl); 431 kfree(mfrpl);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index cf000b7ad64f..dfc6ca128a7e 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -40,11 +40,17 @@
40#include <rdma/ib_addr.h> 40#include <rdma/ib_addr.h>
41#include <rdma/ib_mad.h> 41#include <rdma/ib_mad.h>
42 42
43#include <linux/mlx4/driver.h>
43#include <linux/mlx4/qp.h> 44#include <linux/mlx4/qp.h>
44 45
45#include "mlx4_ib.h" 46#include "mlx4_ib.h"
46#include "user.h" 47#include "user.h"
47 48
49static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
50 struct mlx4_ib_cq *recv_cq);
51static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
52 struct mlx4_ib_cq *recv_cq);
53
48enum { 54enum {
49 MLX4_IB_ACK_REQ_FREQ = 8, 55 MLX4_IB_ACK_REQ_FREQ = 8,
50}; 56};
@@ -93,17 +99,6 @@ enum {
93#ifndef ETH_ALEN 99#ifndef ETH_ALEN
94#define ETH_ALEN 6 100#define ETH_ALEN 6
95#endif 101#endif
96static inline u64 mlx4_mac_to_u64(u8 *addr)
97{
98 u64 mac = 0;
99 int i;
100
101 for (i = 0; i < ETH_ALEN; i++) {
102 mac <<= 8;
103 mac |= addr[i];
104 }
105 return mac;
106}
107 102
108static const __be32 mlx4_ib_opcode[] = { 103static const __be32 mlx4_ib_opcode[] = {
109 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 104 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
@@ -628,6 +623,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
628 struct mlx4_ib_sqp *sqp; 623 struct mlx4_ib_sqp *sqp;
629 struct mlx4_ib_qp *qp; 624 struct mlx4_ib_qp *qp;
630 enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; 625 enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
626 struct mlx4_ib_cq *mcq;
627 unsigned long flags;
631 628
632 /* When tunneling special qps, we use a plain UD qp */ 629 /* When tunneling special qps, we use a plain UD qp */
633 if (sqpn) { 630 if (sqpn) {
@@ -838,6 +835,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
838 qp->mqp.event = mlx4_ib_qp_event; 835 qp->mqp.event = mlx4_ib_qp_event;
839 if (!*caller_qp) 836 if (!*caller_qp)
840 *caller_qp = qp; 837 *caller_qp = qp;
838
839 spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
840 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
841 to_mcq(init_attr->recv_cq));
842 /* Maintain device to QPs access, needed for further handling
843 * via reset flow
844 */
845 list_add_tail(&qp->qps_list, &dev->qp_list);
846 /* Maintain CQ to QPs access, needed for further handling
847 * via reset flow
848 */
849 mcq = to_mcq(init_attr->send_cq);
850 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
851 mcq = to_mcq(init_attr->recv_cq);
852 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
853 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
854 to_mcq(init_attr->recv_cq));
855 spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
841 return 0; 856 return 0;
842 857
843err_qpn: 858err_qpn:
@@ -896,13 +911,13 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
896 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 911 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
897{ 912{
898 if (send_cq == recv_cq) { 913 if (send_cq == recv_cq) {
899 spin_lock_irq(&send_cq->lock); 914 spin_lock(&send_cq->lock);
900 __acquire(&recv_cq->lock); 915 __acquire(&recv_cq->lock);
901 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 916 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
902 spin_lock_irq(&send_cq->lock); 917 spin_lock(&send_cq->lock);
903 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 918 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
904 } else { 919 } else {
905 spin_lock_irq(&recv_cq->lock); 920 spin_lock(&recv_cq->lock);
906 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 921 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
907 } 922 }
908} 923}
@@ -912,13 +927,13 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re
912{ 927{
913 if (send_cq == recv_cq) { 928 if (send_cq == recv_cq) {
914 __release(&recv_cq->lock); 929 __release(&recv_cq->lock);
915 spin_unlock_irq(&send_cq->lock); 930 spin_unlock(&send_cq->lock);
916 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 931 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
917 spin_unlock(&recv_cq->lock); 932 spin_unlock(&recv_cq->lock);
918 spin_unlock_irq(&send_cq->lock); 933 spin_unlock(&send_cq->lock);
919 } else { 934 } else {
920 spin_unlock(&send_cq->lock); 935 spin_unlock(&send_cq->lock);
921 spin_unlock_irq(&recv_cq->lock); 936 spin_unlock(&recv_cq->lock);
922 } 937 }
923} 938}
924 939
@@ -963,6 +978,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
963 int is_user) 978 int is_user)
964{ 979{
965 struct mlx4_ib_cq *send_cq, *recv_cq; 980 struct mlx4_ib_cq *send_cq, *recv_cq;
981 unsigned long flags;
966 982
967 if (qp->state != IB_QPS_RESET) { 983 if (qp->state != IB_QPS_RESET) {
968 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), 984 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
@@ -994,8 +1010,13 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
994 1010
995 get_cqs(qp, &send_cq, &recv_cq); 1011 get_cqs(qp, &send_cq, &recv_cq);
996 1012
1013 spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
997 mlx4_ib_lock_cqs(send_cq, recv_cq); 1014 mlx4_ib_lock_cqs(send_cq, recv_cq);
998 1015
1016 /* del from lists under both locks above to protect reset flow paths */
1017 list_del(&qp->qps_list);
1018 list_del(&qp->cq_send_list);
1019 list_del(&qp->cq_recv_list);
999 if (!is_user) { 1020 if (!is_user) {
1000 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, 1021 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1001 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); 1022 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
@@ -1006,6 +1027,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
1006 mlx4_qp_remove(dev->dev, &qp->mqp); 1027 mlx4_qp_remove(dev->dev, &qp->mqp);
1007 1028
1008 mlx4_ib_unlock_cqs(send_cq, recv_cq); 1029 mlx4_ib_unlock_cqs(send_cq, recv_cq);
1030 spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
1009 1031
1010 mlx4_qp_free(dev->dev, &qp->mqp); 1032 mlx4_qp_free(dev->dev, &qp->mqp);
1011 1033
@@ -1915,6 +1937,22 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1915 goto out; 1937 goto out;
1916 } 1938 }
1917 1939
1940 if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
1941 if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
1942 if ((ibqp->qp_type == IB_QPT_RC) ||
1943 (ibqp->qp_type == IB_QPT_UD) ||
1944 (ibqp->qp_type == IB_QPT_UC) ||
1945 (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
1946 (ibqp->qp_type == IB_QPT_XRC_INI)) {
1947 attr->port_num = mlx4_ib_bond_next_port(dev);
1948 }
1949 } else {
1950 /* no sense in changing port_num
1951 * when ports are bonded */
1952 attr_mask &= ~IB_QP_PORT;
1953 }
1954 }
1955
1918 if ((attr_mask & IB_QP_PORT) && 1956 if ((attr_mask & IB_QP_PORT) &&
1919 (attr->port_num == 0 || attr->port_num > dev->num_ports)) { 1957 (attr->port_num == 0 || attr->port_num > dev->num_ports)) {
1920 pr_debug("qpn 0x%x: invalid port number (%d) specified " 1958 pr_debug("qpn 0x%x: invalid port number (%d) specified "
@@ -1965,6 +2003,9 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1965 2003
1966 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); 2004 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1967 2005
2006 if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
2007 attr->port_num = 1;
2008
1968out: 2009out:
1969 mutex_unlock(&qp->mutex); 2010 mutex_unlock(&qp->mutex);
1970 return err; 2011 return err;
@@ -2609,8 +2650,15 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2609 __be32 uninitialized_var(lso_hdr_sz); 2650 __be32 uninitialized_var(lso_hdr_sz);
2610 __be32 blh; 2651 __be32 blh;
2611 int i; 2652 int i;
2653 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
2612 2654
2613 spin_lock_irqsave(&qp->sq.lock, flags); 2655 spin_lock_irqsave(&qp->sq.lock, flags);
2656 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
2657 err = -EIO;
2658 *bad_wr = wr;
2659 nreq = 0;
2660 goto out;
2661 }
2614 2662
2615 ind = qp->sq_next_wqe; 2663 ind = qp->sq_next_wqe;
2616 2664
@@ -2908,10 +2956,18 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2908 int ind; 2956 int ind;
2909 int max_gs; 2957 int max_gs;
2910 int i; 2958 int i;
2959 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
2911 2960
2912 max_gs = qp->rq.max_gs; 2961 max_gs = qp->rq.max_gs;
2913 spin_lock_irqsave(&qp->rq.lock, flags); 2962 spin_lock_irqsave(&qp->rq.lock, flags);
2914 2963
2964 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
2965 err = -EIO;
2966 *bad_wr = wr;
2967 nreq = 0;
2968 goto out;
2969 }
2970
2915 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); 2971 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2916 2972
2917 for (nreq = 0; wr; ++nreq, wr = wr->next) { 2973 for (nreq = 0; wr; ++nreq, wr = wr->next) {
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 62d9285300af..dce5dfe3a70e 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -316,8 +316,15 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
316 int err = 0; 316 int err = 0;
317 int nreq; 317 int nreq;
318 int i; 318 int i;
319 struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
319 320
320 spin_lock_irqsave(&srq->lock, flags); 321 spin_lock_irqsave(&srq->lock, flags);
322 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
323 err = -EIO;
324 *bad_wr = wr;
325 nreq = 0;
326 goto out;
327 }
321 328
322 for (nreq = 0; wr; ++nreq, wr = wr->next) { 329 for (nreq = 0; wr; ++nreq, wr = wr->next) {
323 if (unlikely(wr->num_sge > srq->msrq.max_gs)) { 330 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
@@ -362,6 +369,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
362 369
363 *srq->db.db = cpu_to_be32(srq->wqe_ctr); 370 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
364 } 371 }
372out:
365 373
366 spin_unlock_irqrestore(&srq->lock, flags); 374 spin_unlock_irqrestore(&srq->lock, flags);
367 375
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index cb4c66e723b5..d10c2b8a5dad 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
375 char base_name[9]; 375 char base_name[9];
376 376
377 /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */ 377 /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
378 strlcpy(name, pci_name(dev->dev->pdev), max); 378 strlcpy(name, pci_name(dev->dev->persist->pdev), max);
379 strncpy(base_name, name, 8); /*till xxxx:yy:*/ 379 strncpy(base_name, name, 8); /*till xxxx:yy:*/
380 base_name[8] = '\0'; 380 base_name[8] = '\0';
381 /* with no ARI only 3 last bits are used so when the fn is higher than 8 381 /* with no ARI only 3 last bits are used so when the fn is higher than 8
@@ -792,7 +792,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device)
792 if (!mlx4_is_master(device->dev)) 792 if (!mlx4_is_master(device->dev))
793 return 0; 793 return 0;
794 794
795 for (i = 0; i <= device->dev->num_vfs; ++i) 795 for (i = 0; i <= device->dev->persist->num_vfs; ++i)
796 register_one_pkey_tree(device, i); 796 register_one_pkey_tree(device, i);
797 797
798 return 0; 798 return 0;
@@ -807,7 +807,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
807 if (!mlx4_is_master(device->dev)) 807 if (!mlx4_is_master(device->dev))
808 return; 808 return;
809 809
810 for (slave = device->dev->num_vfs; slave >= 0; --slave) { 810 for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) {
811 list_for_each_entry_safe(p, t, 811 list_for_each_entry_safe(p, t,
812 &device->pkeys.pkey_port_list[slave], 812 &device->pkeys.pkey_port_list[slave],
813 entry) { 813 entry) {
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index b56e4c5593ee..611a9fdf2f38 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
81 for (k = 0; k < len; k++) { 81 for (k = 0; k < len; k++) {
82 if (!(i & mask)) { 82 if (!(i & mask)) {
83 tmp = (unsigned long)pfn; 83 tmp = (unsigned long)pfn;
84 m = min(m, find_first_bit(&tmp, sizeof(tmp))); 84 m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
85 skip = 1 << m; 85 skip = 1 << m;
86 mask = skip - 1; 86 mask = skip - 1;
87 base = pfn; 87 base = pfn;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 49eb5111d2cd..70acda91eb2a 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -373,11 +373,11 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
373 wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; 373 wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
374 374
375 /* setup the VLAN tag if present */ 375 /* setup the VLAN tag if present */
376 if (vlan_tx_tag_present(skb)) { 376 if (skb_vlan_tag_present(skb)) {
377 nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", 377 nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
378 netdev->name, vlan_tx_tag_get(skb)); 378 netdev->name, skb_vlan_tag_get(skb));
379 wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; 379 wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
380 wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb); 380 wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
381 } else 381 } else
382 wqe_misc = 0; 382 wqe_misc = 0;
383 383
@@ -576,11 +576,12 @@ tso_sq_no_longer_full:
576 wqe_fragment_length = 576 wqe_fragment_length =
577 (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; 577 (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
578 /* setup the VLAN tag if present */ 578 /* setup the VLAN tag if present */
579 if (vlan_tx_tag_present(skb)) { 579 if (skb_vlan_tag_present(skb)) {
580 nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", 580 nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
581 netdev->name, vlan_tx_tag_get(skb) ); 581 netdev->name,
582 skb_vlan_tag_get(skb));
582 wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; 583 wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
583 wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb); 584 wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
584 } else 585 } else
585 wqe_misc = 0; 586 wqe_misc = 0;
586 587